code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = "▁"
SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
SCREAMING_SNAKE_CASE : Tuple = {
"facebook/xglm-564M": 2048,
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowercase : List[str] = 7
_lowercase : str = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words)]
_lowercase : List[str] = kwargs.get('additional_special_tokens', [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCamelCase))
_lowercase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_lowercase : Any = len(self.sp_model)
_lowercase : Optional[int] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowerCamelCase)
_lowercase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.__dict__.copy()
_lowercase : Optional[int] = None
_lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : str = {}
_lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowercase : Union[str, Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase))
return [1] + ([0] * len(lowerCamelCase)) + [1, 1] + ([0] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = ''.join(lowerCamelCase).replace(lowerCamelCase, ' ').strip()
return out_string
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : Optional[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase, 'wb') as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase)
return (out_vocab_file,)
| 21 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : Tuple = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : Optional[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 255 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
__lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(SCREAMING_SNAKE_CASE_ , 2 ) * torus_radius * tube_radius
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
__lowerCAmelCase = (sidea + sidea + sidea) / 2
__lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \\nlength of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 353 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
"""simple docstring"""
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 102 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : List[Any] = 16
__lowercase : Optional[int] = 32
def lowercase_ ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(_lowercase )
lowerCamelCase_ : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Tuple = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCamelCase_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
model.eval()
lowerCamelCase_ : int = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ : List[str] = model(**_lowercase )
lowerCamelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_, lowerCamelCase_ : Dict = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCamelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCamelCase_ : str = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : Any = config['''lr''']
lowerCamelCase_ : str = int(config['''num_epochs'''] )
lowerCamelCase_ : List[str] = int(config['''seed'''] )
lowerCamelCase_ : List[str] = int(config['''batch_size'''] )
lowerCamelCase_ : Dict = args.model_name_or_path
set_seed(_lowercase )
lowerCamelCase_, lowerCamelCase_ : str = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCamelCase_ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : Any = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase_ : Any = 1
lowerCamelCase_ : str = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : Any = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCamelCase_ : int = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : int = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Any = evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase_ : int = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ : Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ : Union[str, Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowerCamelCase_ : List[str] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ : Optional[Any] = int(_lowercase ) + 1
lowerCamelCase_ : List[Any] = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowerCamelCase_ : Union[str, Any] = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ : int = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCamelCase_ : Dict = model(**_lowercase )
lowerCamelCase_ : Optional[int] = outputs.loss
lowerCamelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ : Optional[Any] = F"""epoch_{epoch}"""
lowerCamelCase_ : Union[str, Any] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
lowerCamelCase_ : str = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCamelCase_ : int = accuracy
lowerCamelCase_ : Dict = lr_scheduler.get_lr()[0]
lowerCamelCase_ : Optional[int] = optimizer.param_groups[0]['''lr''']
lowerCamelCase_ : List[str] = epoch
lowerCamelCase_ : str = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 318 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""GLPNFeatureExtractor"""]
lowerCamelCase__ = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 368 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase : Tuple = dataset_size < in_memory_max_size
else:
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[int] = is_small_dataset(_UpperCamelCase )
assert result == expected | 182 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
return int(x / 2**20 )
class __lowerCAmelCase :
"""simple docstring"""
def __enter__( self ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCamelCase__ ) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__lowerCamelCase = torch.cuda.memory_allocated()
__lowerCamelCase = torch.cuda.max_memory_allocated()
__lowerCamelCase = bamb(self.end - self.begin )
__lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase_ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" , UpperCamelCase__ : int = 320 , UpperCamelCase__ : int = 160 , ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
__lowerCamelCase = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(UpperCamelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
__lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['lr']
__lowerCamelCase = int(config['num_epochs'] )
__lowerCamelCase = int(config['seed'] )
__lowerCamelCase = int(config['batch_size'] )
__lowerCamelCase = args.model_name_or_path
set_seed(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
__lowerCamelCase = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
# Now we train the model
__lowerCamelCase = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCamelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=UpperCamelCase__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=UpperCamelCase__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=UpperCamelCase__ , default=1 , help='Number of train epochs.' , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 90 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=6_4 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = vocab_size - 1
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past['''hidden_states'''][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = GPTNeoXModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=6_4 , num_attention_heads=8 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def a_ ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a_ ( self , __snake_case ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = ids_tensor([1, 1_0] , config.vocab_size )
snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = GPTNeoXModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
snake_case = original_model(__snake_case ).last_hidden_state
snake_case = original_model(__snake_case ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = {'''type''': scaling_type, '''factor''': 10.0}
snake_case = GPTNeoXModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
snake_case = scaled_model(__snake_case ).last_hidden_state
snake_case = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__snake_case )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=2_0 )
snake_case = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case )
| 213 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = StableDiffusionXLImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
_snake_case : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
_UpperCamelCase = CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase__ )
_UpperCamelCase = CLIPTextModelWithProjection(lowerCAmelCase__ )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase__ )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCamelCase = image / 2 + 0.5
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
_UpperCamelCase = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = sd_pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Tuple ) -> Dict:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
_UpperCamelCase = sd_pipe.to(lowerCAmelCase__ )
_UpperCamelCase = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# forward without prompt embeds
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = negative_prompt
_UpperCamelCase = 3 * [inputs['''prompt''']]
_UpperCamelCase = sd_pipe(**lowerCAmelCase__ )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase = sd_pipe(
**lowerCAmelCase__ , prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , pooled_prompt_embeds=lowerCAmelCase__ , negative_pooled_prompt_embeds=lowerCAmelCase__ , )
_UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="cpu" , lowerCAmelCase__ : Any=torch.floataa , lowerCAmelCase__ : List[str]=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = pipe(**lowerCAmelCase__ ).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 324 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Tuple:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case=True ) -> str:
model.train()
lowercase__: Any = model(snake_case )
lowercase__: str = F.mse_loss(snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case )
def snake_case_ ( snake_case , snake_case=False ) -> int:
set_seed(42 )
lowercase__: Optional[Any] = RegressionModel()
lowercase__: int = deepcopy(snake_case )
lowercase__: str = RegressionDataset(length=80 )
lowercase__: Dict = DataLoader(snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase__: Union[str, Any] = AdamW(params=model.parameters() , lr=1e-3 )
lowercase__: List[str] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase__: List[Any] = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.6_5 )
lowercase__: Dict = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__: List[Any] = accelerator.prepare(snake_case , snake_case , snake_case , snake_case )
else:
lowercase__ , lowercase__: Optional[int] = accelerator.prepare(snake_case , snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( snake_case ) -> List[Any]:
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__: Optional[int] = get_training_setup(snake_case )
# Use a single batch
lowercase__ , lowercase__: Dict = next(iter(snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__: str = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__: Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
else:
# Sync grads
step_model(snake_case , snake_case , snake_case , snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case , snake_case , snake_case , snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase__: Optional[int] = ddp_input[torch.randperm(len(snake_case ) )]
def snake_case_ ( snake_case ) -> Optional[Any]:
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__: int = get_training_setup(snake_case )
# Use a single batch
lowercase__ , lowercase__: str = next(iter(snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__: str = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__: Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
else:
# Sync grads
step_model(snake_case , snake_case , snake_case , snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase__: List[Any] = ddp_input[torch.randperm(len(snake_case ) )]
def snake_case_ ( snake_case=False , snake_case=False ) -> List[str]:
lowercase__: List[str] = Accelerator(
split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__: List[str] = get_training_setup(snake_case )
for iteration, batch in enumerate(snake_case ):
lowercase__ , lowercase__: Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__: Dict = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__: Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase__: str = ddp_input[torch.randperm(len(snake_case ) )]
GradientState._reset_state()
def snake_case_ ( snake_case=False , snake_case=False ) -> Dict:
lowercase__: Union[str, Any] = Accelerator(
split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__: Dict = get_training_setup(snake_case , snake_case )
for iteration, batch in enumerate(snake_case ):
lowercase__ , lowercase__: Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__: str = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__: Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case , snake_case , snake_case , snake_case , snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowercase__: Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case , snake_case , snake_case , snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def snake_case_ ( ) -> Optional[Any]:
lowercase__: Optional[int] = Accelerator()
lowercase__: Tuple = RegressionDataset(length=80 )
lowercase__: Any = DataLoader(snake_case , batch_size=16 )
lowercase__: Union[str, Any] = RegressionDataset(length=96 )
lowercase__: Union[str, Any] = DataLoader(snake_case , batch_size=16 )
lowercase__ , lowercase__: List[str] = accelerator.prepare(snake_case , snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case )
if iteration < len(snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case )
if batch_num < len(snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ) -> int:
lowercase__: List[Any] = Accelerator()
lowercase__: Optional[int] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(snake_case , snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case , snake_case )
def snake_case_ ( snake_case ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 288 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[int]:
lowercase__: Tuple = 0
lowercase__: str = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: str = i + 1
else:
lowercase__: Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 288 | 1 |
"""simple docstring"""
from typing import List
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = {key: len(__lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCAmelCase , __lowerCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowercase_ = max(lists_lengths.values() , default=0 )
return max(1 , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[range]:
'''simple docstring'''
lowercase_ = []
for group_idx in range(__lowerCAmelCase ):
lowercase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ = range(__lowerCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__lowerCAmelCase )
return shards_indices_per_group
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[dict]:
'''simple docstring'''
lowercase_ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
if num_shards == 1:
return [dict(__lowerCAmelCase )]
else:
lowercase_ = _distribute_shards(num_shards=__lowerCAmelCase , max_num_jobs=__lowerCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowerCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> dict:
'''simple docstring'''
lowercase_ = {len(__lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(__lowerCAmelCase , __lowerCAmelCase )}
lowercase_ = {}
for size in list_sizes:
lowercase_ = list(range(__lowerCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ = dict(__lowerCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = [value[i] for i in indices_per_size[len(__lowerCAmelCase )]]
return shuffled_kwargs
| 136 |
"""simple docstring"""
import random
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = num - 1
lowercase_ = 0
while s % 2 == 0:
lowercase_ = s // 2
t += 1
for _ in range(5 ):
lowercase_ = random.randrange(2 , num - 1 )
lowercase_ = pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if v != 1:
lowercase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase_ = i + 1
lowercase_ = (v**2) % num
return True
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> bool:
'''simple docstring'''
if num < 2:
return False
lowercase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_24 ) -> int:
'''simple docstring'''
while True:
lowercase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__lowerCAmelCase ):
return num
if __name__ == "__main__":
UpperCAmelCase : Tuple = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 136 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
snake_case = 1
snake_case = 2
while i * i <= n:
snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_UpperCamelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 350 | """simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any:
"""simple docstring"""
try:
snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case = default
else:
# KEY is set, convert it to True or False.
try:
snake_case = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = AcceleratorState()
snake_case = tensor[None].clone().to(state.device )
snake_case = gather(_UpperCamelCase ).cpu()
snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = returncode
snake_case = stdout
snake_case = stderr
async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
while True:
snake_case = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case = []
snake_case = []
def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ):
snake_case = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput:
"""simple docstring"""
snake_case = asyncio.get_event_loop()
snake_case = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case = ' '.join(_UpperCamelCase )
if result.returncode > 0:
snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 149 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case_( a__ ):
__UpperCamelCase = '''mobilenet_v1'''
def __init__( self : str , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Optional[int]=2_2_4 , UpperCamelCase_ : List[Any]=1.0 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : str="relu6" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=0.999 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=0.001 , **UpperCamelCase_ : str , ):
super().__init__(**UpperCamelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : str = image_size
lowerCAmelCase : Any = depth_multiplier
lowerCAmelCase : Tuple = min_depth
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Optional[int] = tf_padding
lowerCAmelCase : int = classifier_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : int ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : str ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return 1E-4
| 60 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__magic_name__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
__magic_name__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__magic_name__ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
__SCREAMING_SNAKE_CASE = True
# Deal with multi-line cases
elif (
re.search(
rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , lowerCAmelCase__ , )
is not None
):
__SCREAMING_SNAKE_CASE = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__SCREAMING_SNAKE_CASE = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__SCREAMING_SNAKE_CASE = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__SCREAMING_SNAKE_CASE = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__SCREAMING_SNAKE_CASE = True
if not attribute_used:
__SCREAMING_SNAKE_CASE = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__SCREAMING_SNAKE_CASE = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__SCREAMING_SNAKE_CASE = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__SCREAMING_SNAKE_CASE = True
elif attribute.endswith("""_token_id""" ):
__SCREAMING_SNAKE_CASE = True
# configuration class specific cases
if not case_allowed:
__SCREAMING_SNAKE_CASE = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__SCREAMING_SNAKE_CASE = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = dict(inspect.signature(config_class.__init__ ).parameters )
__SCREAMING_SNAKE_CASE = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__SCREAMING_SNAKE_CASE = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__SCREAMING_SNAKE_CASE = {}
if len(config_class.attribute_map ) > 0:
__SCREAMING_SNAKE_CASE = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__SCREAMING_SNAKE_CASE = inspect.getsourcefile(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__SCREAMING_SNAKE_CASE = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__SCREAMING_SNAKE_CASE = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
__SCREAMING_SNAKE_CASE = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
__SCREAMING_SNAKE_CASE = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__SCREAMING_SNAKE_CASE = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda UpperCamelCase_ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__SCREAMING_SNAKE_CASE = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = unused_attributes
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 366 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__)
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__SCREAMING_SNAKE_CASE = TransformeraDModel(**lowerCAmelCase__)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""")
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""")
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image).max() < 2.0
| 255 | 0 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _A ( A__ ):
"""simple docstring"""
__lowercase = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=A__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=A__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=A__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=A__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=A__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=A__ , type=A__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=A__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A ( A__ ):
"""simple docstring"""
if args.calibrator == "max":
__lowercase = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__lowercase = '''histogram'''
elif args.calibrator == "mse":
__lowercase = '''histogram'''
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
__lowercase = QuantDescriptor(num_bits=args.aprec , calib_method=A__ )
__lowercase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(A__ )
def _A ( A__ , A__ , A__=False , A__=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A__ , ['''embeddings'''] , which='''weight''' , _disabled=A__ )
if args.quant_disable:
set_quantizer_by_name(A__ , [''''''] , _disabled=A__ )
if args.quant_disable_keyword:
set_quantizer_by_name(A__ , args.quant_disable_keyword , _disabled=A__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=A__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=A__ )
if args.recalibrate_weights:
recalibrate_weights(A__ )
if args.fuse_qkv:
fuse_qkv(A__ , A__ )
if args.clip_gelu:
clip_gelu(A__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A__ )
def _A ( A__ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _A ( A__ , A__ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
def fusea(A__ , A__ , A__ ):
for mod in [qq, qk, qv]:
if not hasattr(A__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__lowercase = qq._amax.detach().item()
__lowercase = qk._amax.detach().item()
__lowercase = qv._amax.detach().item()
__lowercase = max(A__ , A__ , A__ )
qq._amax.fill_(A__ )
qk._amax.fill_(A__ )
qv._amax.fill_(A__ )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( A__ , A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__lowercase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A__ )
__lowercase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__lowercase = mod.weight.shape[0]
__lowercase = mod._weight_quantizer._amax.detach()
__lowercase = torch.ones(A__ , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowercase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowercase = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowercase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A__ , keepdims=A__ ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
__lowercase = amax
def _A ( A__ , A__=25 , A__=180 , A__=None ):
"""simple docstring"""
if ignore is None:
__lowercase = []
elif not isinstance(A__ , A__ ):
__lowercase = [ignore]
__lowercase = 0
for name, mod in model.named_modules():
if not hasattr(A__ , '''weight''' ):
continue
__lowercase = max(A__ , len(A__ ) )
for name, mod in model.named_modules():
__lowercase = getattr(A__ , '''_input_quantizer''' , A__ )
__lowercase = getattr(A__ , '''_weight_quantizer''' , A__ )
if not hasattr(A__ , '''weight''' ):
continue
if type(A__ ) in ignore:
continue
if [True for s in ignore if type(A__ ) is str and s in name]:
continue
__lowercase = F"Act:{input_q.extra_repr()}"
__lowercase = F"Wgt:{weight_q.extra_repr()}"
__lowercase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(A__ ) <= line_width:
logger.info(A__ )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
for name, mod in model.named_modules():
if isinstance(A__ , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = getattr(A__ , A__ , A__ )
if quantizer_mod is not None:
assert hasattr(A__ , A__ )
setattr(A__ , A__ , A__ )
else:
logger.warning(F"{name} has no {quantizer}" )
def _A ( A__ , A__ , A__="both" , **A__ ):
"""simple docstring"""
__lowercase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(A__ , A__ , '''_input_quantizer''' , A__ , A__ )
if which in ["weight", "both"]:
set_quantizer(A__ , A__ , '''_weight_quantizer''' , A__ , A__ )
logger.info(A__ )
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_input_quantizer''' ) or hasattr(A__ , '''_weight_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
set_quantizers(A__ , A__ , **A__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
__lowercase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(A__ , A__ , A__ )
logger.info(A__ )
| 104 |
'''simple docstring'''
def _A ( A__ = 10 , A__ = 22 ):
"""simple docstring"""
__lowercase = range(1 , A__ )
__lowercase = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 104 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case :
def __init__( self : Dict , A : List[Any] , ):
'''simple docstring'''
a : Union[str, Any] = parent
a : Dict = 1_3
a : Dict = 7
a : Dict = 3_0
a : Dict = self.seq_length + self.mem_len
a : List[Any] = 1_5
a : List[Any] = True
a : Union[str, Any] = True
a : str = 9_9
a : Tuple = [1_0, 5_0, 8_0]
a : int = 3_2
a : str = 3_2
a : Optional[Any] = 4
a : int = 8
a : Optional[Any] = 1_2_8
a : str = 2
a : List[Any] = 2
a : Dict = None
a : int = 1
a : List[str] = 0
a : Any = 3
a : Optional[Any] = self.vocab_size - 1
a : Union[str, Any] = 0.01
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase__ ( self : Dict , A : List[str] , A : Dict , A : Tuple , A : str ):
'''simple docstring'''
a : Tuple = TFTransfoXLModel(A )
a : List[Any] = model(A ).to_tuple()
a : Union[str, Any] = {'input_ids': input_ids_a, 'mems': mems_a}
a : List[str] = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase__ ( self : List[str] , A : Union[str, Any] , A : Tuple , A : str , A : Dict ):
'''simple docstring'''
a : List[str] = TFTransfoXLLMHeadModel(A )
a : List[str] = model(A ).to_tuple()
a : int = {'input_ids': input_ids_a, 'labels': lm_labels}
a : Tuple = model(A ).to_tuple()
a : Any = model([input_ids_a, mems_a] ).to_tuple()
a : Optional[int] = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
a : Optional[Any] = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase__ ( self : Optional[int] , A : Any , A : Dict , A : Tuple , A : str ):
'''simple docstring'''
a : List[Any] = TFTransfoXLForSequenceClassification(A )
a : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Tuple = self.prepare_config_and_inputs()
(a) : Tuple = config_and_inputs
a : str = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = () if is_tf_available() else ()
__magic_name__ = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Tuple , A : Dict , A : List[str] , A : Optional[Any] , A : Dict , A : Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Tuple = TFTransfoXLModelTester(self )
a : str = ConfigTester(self , config_class=A , d_embed=3_7 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.model_tester.set_seed()
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a : Optional[int] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a : Optional[Any] = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
a : Optional[Any] = model.get_bias()
assert name is None
else:
a : Dict = model.get_output_embeddings()
assert x is None
a : Any = model.get_bias()
assert name is None
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : int = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
@require_tf
class snake_case ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[Any] = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
a : List[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a : Optional[Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a : str = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 360 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ = BeautifulSoup(res.text, "html.parser")
A_ = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 139 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Optional[Any] = set()
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Optional[Any] = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 139 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if not isinstance(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase)
if number < 1:
__lowerCAmelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase)
__lowerCAmelCase = 1
for i in range(1, lowerCamelCase):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __A ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __A ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __A ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Any ):
'''simple docstring'''
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 182 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : str = '''bridgetower_vision_model'''
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=288 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : int=1E-05 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=False , **UpperCAmelCase__ : Optional[int] , ) -> Optional[int]:
super().__init__(**UpperCAmelCase__ )
_a : str = hidden_size
_a : str = num_hidden_layers
_a : Any = num_channels
_a : List[Any] = patch_size
_a : Dict = image_size
_a : int = initializer_factor
_a : Dict = layer_norm_eps
_a : Optional[int] = stop_gradient
_a : Any = share_layernorm
_a : Tuple = remove_last_layer
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[Any] ) -> "PretrainedConfig":
_a , _a : Optional[Any] = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
_a : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = '''bridgetower_text_model'''
def __init__( self : List[Any] , UpperCAmelCase__ : Dict=50265 , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : List[str]=3072 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=514 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Optional[Any]=1E-05 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Optional[Any] , ) -> str:
super().__init__(**UpperCAmelCase__ )
_a : List[Any] = vocab_size
_a : Optional[Any] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : str = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : str = initializer_factor
_a : Tuple = intermediate_size
_a : Optional[Any] = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Optional[int] = type_vocab_size
_a : Any = layer_norm_eps
_a : Optional[Any] = position_embedding_type
_a : Optional[Any] = use_cache
_a : Optional[int] = pad_token_id
_a : Optional[Any] = bos_token_id
_a : Any = eos_token_id
@classmethod
def _lowercase ( cls : List[str] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[Any] ) -> "PretrainedConfig":
_a , _a : Dict = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
_a : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Optional[Any] = '''bridgetower'''
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Optional[int]=1E-05 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]="add" , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : int , ) -> Tuple:
# TODO: remove this once the Hub files are updated.
_a : Optional[Any] = kwargs.pop("""text_config_dict""" , UpperCAmelCase__ )
_a : Union[str, Any] = kwargs.pop("""vision_config_dict""" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
_a : Optional[Any] = share_cross_modal_transformer_layers
_a : Optional[Any] = hidden_act
_a : Tuple = hidden_size
_a : Dict = initializer_factor
_a : int = layer_norm_eps
_a : Optional[int] = share_link_tower_layers
_a : Optional[Any] = link_tower_type
_a : Optional[int] = num_attention_heads
_a : Union[str, Any] = num_hidden_layers
_a : List[Any] = tie_word_embeddings
_a : List[Any] = init_layernorm_from_vision_encoder
if text_config is None:
_a : Union[str, Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_a : Tuple = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_a : List[Any] = BridgeTowerTextConfig(**UpperCAmelCase__ )
_a : Union[str, Any] = BridgeTowerVisionConfig(**UpperCAmelCase__ )
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCAmelCase__ : BridgeTowerTextConfig , UpperCAmelCase__ : BridgeTowerVisionConfig , **UpperCAmelCase__ : Optional[Any] ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def _lowercase ( self : Any ) -> Dict:
_a : Any = copy.deepcopy(self.__dict__ )
_a : List[str] = self.text_config.to_dict()
_a : Tuple = self.vision_config.to_dict()
_a : Optional[int] = self.__class__.model_type
return output
| 324 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100_0000 ) ->int:
'''simple docstring'''
a : Dict = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
a : Dict = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = 0
for ch in input_str:
_SCREAMING_SNAKE_CASE : Optional[Any] = ord(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pow(2 , SCREAMING_SNAKE_CASE__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 | 0 |
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ , lowercase__ ):
def brightness(lowercase__ ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase__ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""") | 12 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , __snake_case=1 / 2_5_5 , __snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_pad
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a_ ( self , __snake_case , __snake_case=False ):
if not batched:
snake_case = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
if w < h:
snake_case = int(self.size['''shortest_edge'''] * h / w )
snake_case = self.size['''shortest_edge''']
elif w > h:
snake_case = self.size['''shortest_edge''']
snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case = self.size['''shortest_edge''']
snake_case = self.size['''shortest_edge''']
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(__snake_case , key=lambda __snake_case : item[0] )[0]
snake_case = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DeformableDetrImageProcessor if is_vision_available() else None
def a_ ( self ):
snake_case = DeformableDetrImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ):
# prepare image and target
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
snake_case = DeformableDetrImageProcessor()
snake_case = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def a_ ( self ):
# prepare image, target and masks_path
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case = DeformableDetrImageProcessor(format='''coco_panoptic''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
snake_case = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 127 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) and isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = len(set_a.intersection(UpperCamelCase_ ) )
if alternative_union:
snake_case = len(UpperCamelCase_ ) + len(UpperCamelCase_ )
else:
snake_case = len(set_a.union(UpperCamelCase_ ) )
return intersection / union
if isinstance(UpperCamelCase_ ,(list, tuple) ) and isinstance(UpperCamelCase_ ,(list, tuple) ):
snake_case = [element for element in set_a if element in set_b]
if alternative_union:
snake_case = len(UpperCamelCase_ ) + len(UpperCamelCase_ )
return len(UpperCamelCase_ ) / union
else:
snake_case = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase_ ) / len(UpperCamelCase_ )
return len(UpperCamelCase_ ) / len(UpperCamelCase_ )
return None
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = {"a", "b", "c", "d", "e"}
_SCREAMING_SNAKE_CASE : List[str] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 127 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """lxmert"""
__snake_case = {}
def __init__( self: Tuple , a: List[Any]=3_0522 , a: List[Any]=768 , a: str=12 , a: Tuple=9500 , a: int=1600 , a: Any=400 , a: Optional[int]=3072 , a: List[Any]="gelu" , a: List[str]=0.1 , a: Optional[int]=0.1 , a: int=512 , a: str=2 , a: Any=0.0_2 , a: List[Any]=1e-12 , a: Union[str, Any]=9 , a: int=5 , a: List[str]=5 , a: List[Any]=2048 , a: Optional[int]=4 , a: Any=6.6_7 , a: List[Any]=True , a: List[Any]=True , a: Optional[int]=True , a: Union[str, Any]=True , a: Optional[int]=True , a: List[str]=True , a: int=True , **a: Tuple , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : Tuple = num_qa_labels
__lowerCamelCase : Optional[int] = num_object_labels
__lowerCamelCase : List[Any] = num_attr_labels
__lowerCamelCase : Dict = l_layers
__lowerCamelCase : str = x_layers
__lowerCamelCase : Optional[Any] = r_layers
__lowerCamelCase : Optional[Any] = visual_feat_dim
__lowerCamelCase : List[str] = visual_pos_dim
__lowerCamelCase : int = visual_loss_normalizer
__lowerCamelCase : Any = task_matched
__lowerCamelCase : int = task_mask_lm
__lowerCamelCase : Optional[Any] = task_obj_predict
__lowerCamelCase : Optional[int] = task_qa
__lowerCamelCase : Optional[int] = visual_obj_loss
__lowerCamelCase : str = visual_attr_loss
__lowerCamelCase : str = visual_feat_loss
__lowerCamelCase : List[Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**a )
| 194 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase_ = "xvjiarui/stable-diffusion-2-inpainting"
lowerCAmelCase_ , lowerCAmelCase_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase , safety_checker=_UpperCamelCase )
lowerCAmelCase_ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase_ = jax.random.PRNGKey(0 )
lowerCAmelCase_ = 50
lowerCAmelCase_ = jax.device_count()
lowerCAmelCase_ = num_samples * [prompt]
lowerCAmelCase_ = num_samples * [init_image]
lowerCAmelCase_ = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = pipeline.prepare_inputs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# shard inputs and rng
lowerCAmelCase_ = replicate(_UpperCamelCase )
lowerCAmelCase_ = jax.random.split(_UpperCamelCase , jax.device_count() )
lowerCAmelCase_ = shard(_UpperCamelCase )
lowerCAmelCase_ = shard(_UpperCamelCase )
lowerCAmelCase_ = shard(_UpperCamelCase )
lowerCAmelCase_ = pipeline(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , jit=_UpperCamelCase )
lowerCAmelCase_ = output.images.reshape(_UpperCamelCase , 512 , 512 , 3 )
lowerCAmelCase_ = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 231 |
from pathlib import Path
import fire
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = Path(__lowerCAmelCase )
lowerCAmelCase_ = Path(__lowerCAmelCase )
dest_dir.mkdir(exist_ok=__lowerCAmelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ = dest_dir.joinpath(path.name )
print(__lowerCAmelCase )
dest_path.open("w" ).write("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 231 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : list[float] ) -> float:
"""simple docstring"""
_lowerCAmelCase = 0.0_0
_lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(snake_case_ )
first_sum += 1 / float(snake_case_ )
index += 1
return 1 / first_sum
def __UpperCAmelCase ( snake_case_ : list[float] ) -> float:
"""simple docstring"""
_lowerCAmelCase = 0.0_0
_lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase = F"""Resistor at index {index} has a negative value!"""
raise ValueError(snake_case_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 369 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 317 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
SCREAMING_SNAKE_CASE_ : str = {value: key for key, value in encode_dict.items()}
def _snake_case ( UpperCAmelCase_ : str ):
A__ = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _snake_case ( UpperCAmelCase_ : str ):
if set(UpperCAmelCase_ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
A__ = """"""
for word in coded.split():
while len(UpperCAmelCase_ ) != 0:
decoded += decode_dict[word[:5]]
A__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = len(__lowerCAmelCase )
snake_case_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case_ = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Dict:
__a = int(lowerCAmelCase__ )
if n_element < 1:
__a = ValueError('''a should be a positive number''' )
raise my_error
__a = [1]
__a = (0, 0, 0)
__a = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
lowercase_ = hamming(int(n))
print("-----------------------------------------------------")
print(F'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 45 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCamelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCamelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE )[0]
@deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.data to implement this functionality.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
A_ : Union[str, Any] = _readaa(SCREAMING_SNAKE_CASE )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A_ : str = _readaa(SCREAMING_SNAKE_CASE )
A_ : Tuple = _readaa(SCREAMING_SNAKE_CASE )
A_ : Dict = _readaa(SCREAMING_SNAKE_CASE )
A_ : Tuple = bytestream.read(rows * cols * num_images )
A_ : Dict = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
A_ : Union[str, Any] = data.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.one_hot on tensors.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = labels_dense.shape[0]
A_ : List[str] = numpy.arange(SCREAMING_SNAKE_CASE ) * num_classes
A_ : int = numpy.zeros((num_labels, num_classes) )
A_ : Tuple = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE , '''Please use tf.data to implement this functionality.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE ) as bytestream:
A_ : Any = _readaa(SCREAMING_SNAKE_CASE )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A_ : Tuple = _readaa(SCREAMING_SNAKE_CASE )
A_ : List[Any] = bytestream.read(SCREAMING_SNAKE_CASE )
A_ : int = numpy.frombuffer(SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return labels
class _lowerCamelCase :
"""simple docstring"""
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , )->Tuple:
'''simple docstring'''
A_ , A_ : List[str] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A_ : Tuple = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
A_ : Optional[Any] = 1_0000
A_ : List[str] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
A_ : List[str] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A_ : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A_ : Optional[int] = images.astype(numpy.floataa )
A_ : List[str] = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 )
A_ : int = images
A_ : Optional[int] = labels
A_ : List[str] = 0
A_ : List[Any] = 0
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return self._images
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return self._labels
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return self._epochs_completed
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True )->str:
'''simple docstring'''
if fake_data:
A_ : Any = [1] * 784
A_ : int = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
A_ : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A_ : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.images[perma]
A_ : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A_ : Tuple = self._num_examples - start
A_ : Union[str, Any] = self._images[start : self._num_examples]
A_ : List[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A_ : List[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.images[perm]
A_ : int = self.labels[perm]
# Start next epoch
A_ : Tuple = 0
A_ : Optional[Any] = batch_size - rest_num_examples
A_ : Tuple = self._index_in_epoch
A_ : Union[str, Any] = self._images[start:end]
A_ : Any = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A_ : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE , '''Please write your own downloading logic.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE )
A_ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not gfile.Exists(SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE ) as f:
A_ : Dict = f.size()
print('''Successfully downloaded''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''bytes.''' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=dtypes.floataa , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=5_000 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE )
A_ : List[str] = fake()
A_ : Tuple = fake()
A_ : Union[str, Any] = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
A_ : List[str] = DEFAULT_SOURCE_URL
A_ : List[Any] = '''train-images-idx3-ubyte.gz'''
A_ : Tuple = '''train-labels-idx1-ubyte.gz'''
A_ : Optional[int] = '''t10k-images-idx3-ubyte.gz'''
A_ : Any = '''t10k-labels-idx1-ubyte.gz'''
A_ : Dict = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Optional[int] = _extract_images(SCREAMING_SNAKE_CASE )
A_ : List[str] = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Tuple = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
A_ : Dict = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : List[str] = _extract_images(SCREAMING_SNAKE_CASE )
A_ : int = _maybe_download(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = _extract_labels(SCREAMING_SNAKE_CASE , one_hot=SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE ):
A_ : str = (
'''Validation size should be between 0 and '''
f'''{len(SCREAMING_SNAKE_CASE )}. Received: {validation_size}.'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = train_images[:validation_size]
A_ : Optional[Any] = train_labels[:validation_size]
A_ : Any = train_images[validation_size:]
A_ : Any = train_labels[validation_size:]
A_ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A_ : List[str] = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A_ : Dict = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A_ : Dict = _DataSet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return _Datasets(train=SCREAMING_SNAKE_CASE , validation=SCREAMING_SNAKE_CASE , test=SCREAMING_SNAKE_CASE )
| 186 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_snake_case = tempfile.mkdtemp()
# fmt: off
_snake_case = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] , **_lowerCamelCase : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def lowercase ( self : Any , **_lowerCamelCase : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Any ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : List[Any] ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase )
_snake_case = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : Dict ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase , return_tensors='''np''' )
_snake_case = tokenizer(_lowerCamelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[int] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = ['''cat''', '''nasa badge''']
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[int] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = [['''cat''', '''nasa badge'''], ['''person''']]
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
_snake_case = len(_lowerCamelCase )
_snake_case = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Union[str, Any] ):
_snake_case = '''google/owlvit-base-patch32'''
_snake_case = OwlViTProcessor.from_pretrained(_lowerCamelCase )
_snake_case = ['''cat''', '''nasa badge''']
_snake_case = processor(text=_lowerCamelCase )
_snake_case = 16
_snake_case = inputs['''input_ids''']
_snake_case = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = self.prepare_image_inputs()
_snake_case = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : str ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 371 |
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_lowerCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers_and_onnx''' )
def lowercase ( self : List[str] ):
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers''' , _lowerCamelCase )
self.assertIn('''flax_and_transformers''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , _lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowercase ( self : List[str] ):
_snake_case = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , '''\nCONSTANT = None\n''' )
_snake_case = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowerCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_snake_case = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_snake_case = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str ):
_snake_case = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_snake_case = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowerCamelCase )
| 40 | 0 |
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase__ )
if number < 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase__ )
__SCREAMING_SNAKE_CASE : int = 1
for i in range(1 , lowercase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 72 | 1 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_A = logging.get_logger(__name__)
_A = """T5Config"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> jnp.ndarray:
lowerCAmelCase__ : List[Any] = jnp.zeros_like(__UpperCAmelCase )
lowerCAmelCase__ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase__ : int = shifted_input_ids.at[:, 0].set(__UpperCAmelCase )
lowerCAmelCase__ : Dict = jnp.where(shifted_input_ids == -100 , __UpperCAmelCase , __UpperCAmelCase )
return shifted_input_ids
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = "mt5"
_lowerCamelCase :Optional[Any] = MTaConfig
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = "mt5"
_lowerCamelCase :Union[str, Any] = MTaConfig
class _lowerCamelCase ( a_ ):
_lowerCamelCase :int = "mt5"
_lowerCamelCase :Any = MTaConfig
| 242 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( __UpperCAmelCase ) -> bool:
return np.array_equal(__UpperCAmelCase , matrix.conjugate().T )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = v.conjugate().T
lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , np.ndarray )
return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase ))
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 242 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase =False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion')
pipe.to(snake_case)
pipe.set_progress_bar_config(disable=snake_case)
_UpperCAmelCase : Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
_UpperCAmelCase : Dict =torch.manual_seed(0)
_UpperCAmelCase : Union[str, Any] =pipe(
image=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
_UpperCAmelCase : List[str] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : Any =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 242 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 1 |
from PIL import Image
def lowerCamelCase__ ( A__ : Image , A__ : float ):
'''simple docstring'''
def brightness(A__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCAmelCase_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 12 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 1 |
"""simple docstring"""
from math import sqrt
def _lowerCAmelCase ( lowercase_ = 1000000 ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ = get_tests_dir("""fixtures/dummy-config.json""")
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = 0
def UpperCAmelCase__ ( self :List[str] ) -> str:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowercase_ , 'fake-roberta' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
try:
AutoConfig.register('custom' , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('model' , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('bert' , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase__ ( self :str ) -> Dict:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase = AutoConfig.from_pretrained('bert-base' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase__ ( self :List[str] ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def UpperCAmelCase__ ( self :str ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """new-model"""
try:
AutoConfig.register('new-model' , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 181 | 1 |
def a ( snake_case__: list[int] , snake_case__: str ):
'''simple docstring'''
lowercase_ = int(snake_case__ )
# Initialize Result
lowercase_ = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__a = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
__a = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"Following is minimal change for {value}: ")
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 30 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = """roberta"""
elif args.model_type == "gpt2":
__UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase = """transformer"""
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase = F'{prefix}.embeddings.{w}.weight'
__UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase = F'{prefix}.embeddings.LayerNorm.{w}'
__UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
__UpperCAmelCase = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F'lm_head.dense.{w}']
__UpperCAmelCase = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F'{prefix}.ln_f.{w}']
__UpperCAmelCase = state_dict["""lm_head.weight"""]
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 139 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=4 , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[Any] = use_attention_mask
__lowerCAmelCase : List[Any] = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : Any = True
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =True
lowerCamelCase : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 139 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : torch.FloatTensor
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : Any = 1
@register_to_config
def __init__( self : Dict , __UpperCAmelCase : int = 2_0_0_0 , __UpperCAmelCase : float = 0.15 , __UpperCAmelCase : float = 0.01 , __UpperCAmelCase : float = 1_3_4_8.0 , __UpperCAmelCase : float = 1e-5 , __UpperCAmelCase : int = 1 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ = None
self.set_sigmas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : Union[str, torch.device] = None ) -> int:
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , __UpperCAmelCase , __UpperCAmelCase , device=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None ) -> str:
SCREAMING_SNAKE_CASE__ = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ = torch.exp(torch.linspace(math.log(__UpperCAmelCase ) , math.log(__UpperCAmelCase ) , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
SCREAMING_SNAKE_CASE__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.get_adjacent_sigma(__UpperCAmelCase , __UpperCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE__ = torch.zeros_like(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCAmelCase , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCAmelCase , prev_sample_mean=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCAmelCase ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ = noise + original_samples
return noisy_samples
def __len__( self : str ) -> int:
return self.config.num_train_timesteps
| 165 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__A = "src/transformers"
# Matches is_xxx_available()
__A = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A = re.compile(R"^\s*try:")
# Catches a line with else:
__A = re.compile(R"^\s*else:")
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
lowercase__: int = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__: str = f.readlines()
lowercase__: List[Any] = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__: Optional[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__: str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
lowercase__: Any = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
lowercase__: List[str] = re.findall(R'''\[([^\]]+)\]''' , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__: str = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
lowercase__: Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__: Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__: Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__: Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__: int = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
lowercase__: Dict = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
lowercase__: Tuple = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
lowercase__: Optional[Any] = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
lowercase__: Any = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
lowercase__: Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__: List[str] = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__: List[str] = lines[line_index]
lowercase__: int = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__: Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__: int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__: str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__: Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__: List[Any] = lines[line_index]
lowercase__: Optional[Any] = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowercase__: Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__: int = []
for key in import_dict_objects.keys():
lowercase__: Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__: Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__: List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
lowercase__: Optional[int] = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
lowercase__: List[str] = os.path.join(__UpperCAmelCase , '''__init__.py''' )
lowercase__: str = parse_init(__UpperCAmelCase )
if objects is not None:
lowercase__: List[str] = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase__: int = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
lowercase__: Any = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__: Optional[Any] = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
lowercase__: Dict = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase__: str = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
lowercase__: Dict = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
__A = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase__: Optional[int] = direct_transformers_import(__UpperCAmelCase )
lowercase__: str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , '''__init__.py''' ) , '''r''' ) as f:
lowercase__: str = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , __UpperCAmelCase ) ) )
lowercase__: Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
lowercase__: Dict = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int =logging.get_logger(__name__)
a__ : Dict ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="roc_bert"
def __init__( self : Dict , __A : Tuple=3_0_5_2_2 , __A : Optional[Any]=7_6_8 , __A : List[Any]=1_2 , __A : List[Any]=1_2 , __A : Any=3_0_7_2 , __A : int="gelu" , __A : Any=0.1 , __A : Optional[int]=0.1 , __A : Optional[int]=5_1_2 , __A : Tuple=2 , __A : Dict=0.02 , __A : Optional[int]=1e-12 , __A : List[str]=True , __A : str=0 , __A : Dict="absolute" , __A : Any=None , __A : Optional[int]=True , __A : Optional[Any]=True , __A : int=7_6_8 , __A : Any=9_1_0 , __A : int=5_1_2 , __A : Optional[int]=2_4_8_5_8 , __A : Optional[int]=True , **__A : Union[str, Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_cache
__UpperCamelCase = enable_pronunciation
__UpperCamelCase = enable_shape
__UpperCamelCase = pronunciation_embed_dim
__UpperCamelCase = pronunciation_vocab_size
__UpperCamelCase = shape_embed_dim
__UpperCamelCase = shape_vocab_size
__UpperCamelCase = concat_input
__UpperCamelCase = position_embedding_type
__UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 53 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case_ ( lowerCAmelCase_ : int = 8 ):
__lowercase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCAmelCase_ )
__lowercase : List[Any] = i // 3
__lowercase : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase : str = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
__lowercase : int = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
pass # Put your code here...
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int = 8 ):
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase : Tuple = any(char in ascii_uppercase for char in password )
__lowercase : Union[str, Any] = any(char in ascii_lowercase for char in password )
__lowercase : Dict = any(char in digits for char in password )
__lowercase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case_ ( ):
__lowercase : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
__lowercase : List[str] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCAmelCase_ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main() | 233 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 3 )-> list:
'''simple docstring'''
UpperCAmelCase__ : Tuple = min(snake_case )
UpperCAmelCase__ : Optional[int] = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 3 )-> list:
'''simple docstring'''
UpperCAmelCase__ : Dict = mean(snake_case )
UpperCAmelCase__ : Tuple = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 362 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
snake_case__ : Union[str, Any] = 50_003
snake_case__ : Optional[int] = 50_002
@require_sentencepiece
@require_tokenizers
class snake_case_( _a , unittest.TestCase ):
__UpperCamelCase = PLBartTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
def lowerCamelCase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[str] = PLBartTokenizer(__UpperCAmelCase , language_codes='''base''' , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = PLBartTokenizer(__UpperCAmelCase , language_codes='''base''' , keep_accents=__UpperCAmelCase )
lowerCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowerCAmelCase : List[str] = tokenizer.vocab_size
lowerCAmelCase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 4 , __UpperCAmelCase )]
self.assertListEqual(__UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowerCAmelCase : Union[str, Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase : Tuple = tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = PLBartTokenizer(__UpperCAmelCase , language_codes='''multi''' , keep_accents=__UpperCAmelCase )
lowerCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowerCAmelCase : Any = tokenizer.vocab_size
lowerCAmelCase : Tuple = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 7 , __UpperCAmelCase )]
self.assertListEqual(
__UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowerCAmelCase : Dict = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_( unittest.TestCase ):
__UpperCamelCase = """uclanlp/plbart-python-en_XX"""
__UpperCamelCase = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__UpperCamelCase = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__UpperCamelCase = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] ):
lowerCAmelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowerCAmelCase : Union[str, Any] = 1
return cls
def lowerCamelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0_0_0_3 )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
lowerCAmelCase : Tuple = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowerCAmelCase : str = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __UpperCAmelCase )
lowerCAmelCase : List[str] = 1_0
lowerCAmelCase : Union[str, Any] = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase : Optional[int] = PLBartTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors='''pt''' )
lowerCAmelCase : Optional[int] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Tuple = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase : Dict = targets["input_ids"]
lowerCAmelCase : str = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0_0_0_1,
} , )
| 60 |
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 | 0 |
'''simple docstring'''
from typing import Any
def lowerCamelCase__ ( _A ):
if not input_list:
return []
a : Dict = [input_list.count(_A ) for value in input_list]
a : Union[str, Any] = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
'''simple docstring'''
from math import factorial, pi
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : Dict = float(_A )
a : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : int = float(_A )
a : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5)) | 96 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : Tuple = "luke"
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=5_0_2_6_7 , __lowerCAmelCase : Any=5_0_0_0_0_0 , __lowerCAmelCase : Optional[int]=7_6_8 , __lowerCAmelCase : Tuple=2_5_6 , __lowerCAmelCase : str=1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Any = entity_vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Optional[int] = entity_emb_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : int = use_entity_aware_attention
_lowerCamelCase : List[str] = classifier_dropout
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ :str = logging.get_logger(__name__)
lowerCAmelCase__ :Any = "▁"
lowerCAmelCase__ :Any = {"vocab_file": "sentencepiece.bpe.model"}
lowerCAmelCase__ :Optional[Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCAmelCase__ :Any = {
"xlm-roberta-base": 5_1_2,
"xlm-roberta-large": 5_1_2,
"xlm-roberta-large-finetuned-conll02-dutch": 5_1_2,
"xlm-roberta-large-finetuned-conll02-spanish": 5_1_2,
"xlm-roberta-large-finetuned-conll03-english": 5_1_2,
"xlm-roberta-large-finetuned-conll03-german": 5_1_2,
}
class __a ( snake_case__ ):
_a : Optional[int] = VOCAB_FILES_NAMES
_a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 0 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase__ ( lowerCamelCase_ : list[Any]):
'''simple docstring'''
create_state_space_tree(lowerCamelCase_ ,[] ,0)
def lowerCAmelCase__ ( lowerCamelCase_ : list[Any] ,lowerCamelCase_ : list[Any] ,lowerCamelCase_ : int):
'''simple docstring'''
if index == len(lowerCamelCase_):
print(lowerCamelCase_)
return
create_state_space_tree(lowerCamelCase_ ,lowerCamelCase_ ,index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(lowerCamelCase_ ,lowerCamelCase_ ,index + 1)
current_subsequence.pop()
if __name__ == "__main__":
__snake_case : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 129 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCamelCase : Optional[int] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
_lowerCamelCase : Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
_lowerCamelCase : int = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
_lowerCamelCase : Any = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
_lowerCamelCase : Dict = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __snake_case( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any]=[1, 10, 100] , _UpperCamelCase : int=4 , _UpperCamelCase : List[Any]=3.0 ) -> Optional[Any]:
'''simple docstring'''
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_UpperCamelCase ) as executor:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = Counter()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = defaultdict(_UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
for candidate in candidates:
SCREAMING_SNAKE_CASE = candidate + "\n" + test_case
SCREAMING_SNAKE_CASE = (test_program, timeout, task_id, completion_id[task_id])
SCREAMING_SNAKE_CASE = executor.submit(_UpperCamelCase , *_UpperCamelCase )
futures.append(_UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for result in results.values():
result.sort()
SCREAMING_SNAKE_CASE = [r[1]["passed"] for r in result]
total.append(len(_UpperCamelCase ) )
correct.append(sum(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = {F"pass@{k}": estimate_pass_at_k(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
def estimator(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = itertools.repeat(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
else:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = iter(UpperCAmelCase__ )
return np.array([estimator(int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) , UpperCAmelCase__ ) for n, c in zip(UpperCAmelCase__ , UpperCAmelCase__ )] )
| 206 | def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 206 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
A__ = 0
while len(UpperCAmelCase_ ) > 1:
A__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
A__ = files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = "x" ,lowercase = 10**-10 ,lowercase = 1 ,):
"""simple docstring"""
_UpperCAmelCase = symbols(lowercase )
_UpperCAmelCase = lambdify(lowercase ,lowercase )
_UpperCAmelCase = lambdify(lowercase ,diff(lowercase ,lowercase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'''{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 30 | """simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase__ = '__DUMMY_TRANSFORMERS_USER__'
lowercase__ = 'Dummy User'
lowercase__ = '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'
lowercase__ = 'https://hub-ci.huggingface.co'
lowercase__ = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowercase__ = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowercase__ = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __a ( ) ->Optional[int]:
return HfApi(endpoint=_SCREAMING_SNAKE_CASE )
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
def _cleanup_repo(_SCREAMING_SNAKE_CASE ):
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
@contextmanager
def _temporary_repo(_SCREAMING_SNAKE_CASE ):
try:
yield repo_id
finally:
cleanup_repo(_SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: List[Any] = F'repo_txt_data-{int(time.time() * 10e3 )}'
a__: str = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data/text_data.txt' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: Optional[Any] = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
a__: Any = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
a__: List[Any] = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo='data.zip' , repo_id=_SCREAMING_SNAKE_CASE , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
return hf_private_dataset_repo_zipped_img_data_
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'num_attention_heads' ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=6_4_0 , _UpperCamelCase=4 , _UpperCamelCase="silu" , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=1_0 , _UpperCamelCase=None , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Union[str, Any] = last_hidden_size
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : int = conv_kernel_size
UpperCAmelCase_ : Dict = output_stride
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = classifier_dropout_prob
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[str] = MobileViTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Dict = MobileViTForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileViTForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : int = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : str = False
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = MobileViTModelTester(self )
UpperCAmelCase_ : str = MobileViTConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[int] = 5
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : Optional[int] = 2
for i in range(len(_UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = MobileViTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> str:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : Tuple = model.to(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : List[Any] = model.to(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = outputs.logits.detach().cpu()
UpperCAmelCase_ : Any = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(5_0, 6_0)] )
UpperCAmelCase_ : Any = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
UpperCAmelCase_ : int = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 353 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print('Building PyTorch model from configuration: {}'.format(str(__snake_case ) ) )
UpperCAmelCase_ : Dict = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print('Save PyTorch model to {}'.format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 145 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCamelCase : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
lowerCamelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase : int = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCamelCase : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase : Any = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase : List[Any] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCamelCase : Union[str, Any] = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCamelCase : Tuple = re.compile(R'^\s*else:')
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
if _re_test_backend.search(A ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowercase__ = _re_one_line_import_struct.search(A ).groups()[0]
lowercase__ = re.findall(R'''\[([^\]]+)\]''' , A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowercase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowercase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = '''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowercase__ = os.path.join(A , '''__init__.py''' )
lowercase__ = parse_init(A )
if objects is not None:
lowercase__ = analyze_results(*A )
if len(A ) > 0:
lowercase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(A ) )
if len(A ) > 0:
raise ValueError('''\n\n'''.join(A ) )
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ = str((Path(A ) / folder).relative_to(A ) )
lowercase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(A ) / fname).relative_to(A ) )
lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A )
return submodules
lowerCamelCase : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _SCREAMING_SNAKE_CASE () -> str:
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowercase__ = direct_transformers_import(A )
lowercase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A , '''__init__.py''' ) , '''r''' ) as f:
lowercase__ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , A ) ) )
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A ) > 0:
lowercase__ = '''\n'''.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 1 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( _lowercase : float = 0.1 ) ->int:
'''simple docstring'''
a : Tuple = 3
a : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
"""simple docstring"""
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
a : Optional[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a : int = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
a : str = input('''Enter Video/IGTV url: ''').strip()
a : str = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 79 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_A : str =pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
_A : Union[str, Any] =dataset.iloc[:, 1:2].values
_A : Any =dataset.iloc[:, 2].values
_A , _A , _A , _A : Union[str, Any] =train_test_split(X, y, test_size=0.2, random_state=0)
_A : List[str] =PolynomialFeatures(degree=4)
_A : List[Any] =poly_reg.fit_transform(X)
_A : int =LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE_ () -> List[str]:
plt.scatter(snake_case__ , snake_case__ , color="""red""" )
plt.plot(snake_case__ , pol_reg.predict(poly_reg.fit_transform(snake_case__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 41 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298 | 0 |
'''simple docstring'''
import cva
import numpy as np
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if k in (0.04, 0.06):
A : str = k
A : Dict = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
"""simple docstring"""
return str(self.k )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
A : List[Any] = cva.imread(SCREAMING_SNAKE_CASE , 0 )
A : Dict = img.shape
A : list[list[int]] = []
A : List[str] = img.copy()
A : str = cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
A : Any = np.gradient(SCREAMING_SNAKE_CASE )
A : List[str] = dx**2
A : Dict = dy**2
A : Optional[Any] = dx * dy
A : List[Any] = 0.04
A : int = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE , h - offset ):
for x in range(SCREAMING_SNAKE_CASE , w - offset ):
A : Tuple = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Any = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A : int = (wxx * wyy) - (wxy**2)
A : Optional[Any] = wxx + wyy
A : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowercase : Optional[int] = HarrisCorner(0.04, 3)
lowercase : Dict = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 357 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) )
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
_a = (
'''Wrong input data\'s dimensions... '''
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(UpperCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_a = (
'''Wrong input data\'s shape... '''
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(UpperCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_a = (
'''Input data have different datatype... '''
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(UpperCamelCase )
_a = []
for value in value_array:
_a = euclidean(UpperCamelCase , dataset[0] )
_a = dataset[0].tolist()
for dataset_value in dataset[1:]:
_a = euclidean(UpperCamelCase , UpperCamelCase )
if dist > temp_dist:
_a = temp_dist
_a = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
return np.dot(UpperCamelCase , UpperCamelCase ) / (norm(UpperCamelCase ) * norm(UpperCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Dict = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[str] = "▁"
lowerCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase : Tuple = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase : str = {
"facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4,
}
# fmt: off
lowerCamelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['input_ids', 'attention_mask']
A__ = []
A__ = []
def __init__( self : int , _a : Tuple , _a : Optional[int]=None , _a : str=None , _a : Tuple="</s>" , _a : List[str]="</s>" , _a : Any="<s>" , _a : Dict="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[int]="<mask>" , _a : Optional[Dict[str, Any]] = None , **_a : List[Any] , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
_SCREAMING_SNAKE_CASE =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_SCREAMING_SNAKE_CASE =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =len(self.sp_model )
_SCREAMING_SNAKE_CASE ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.lang_code_to_id.items()}
_SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
_SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'en_XX'
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[self._src_lang]
_SCREAMING_SNAKE_CASE =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Dict ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A ( self : Optional[Any] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Dict , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Dict , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : str , _a : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE =self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Optional[int] , _a : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : List[Any] , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def A ( self : Optional[int] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def A ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Union[str, Any] , _a : Union[str, Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a )
_SCREAMING_SNAKE_CASE =tgt_lang_id
return inputs
def A ( self : Optional[Any] , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def A ( self : int ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Tuple , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[src_lang]
_SCREAMING_SNAKE_CASE =[self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
def A ( self : Optional[int] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE =[self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
| 47 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['image_processor']
lowerCamelCase : str = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : str = -10
__lowerCamelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
__lowerCamelCase : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCamelCase : List[Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCamelCase : Dict = original_sizes.numpy()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCamelCase , __lowerCamelCase : Tuple = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
__lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : str = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCamelCase : Dict = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCamelCase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
__lowerCamelCase : Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
__lowerCamelCase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = processed_input_points
return input_points, input_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> np.ndarray:
__lowerCamelCase , __lowerCamelCase : Tuple = original_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
__lowerCamelCase : Optional[int] = coords.reshape(-1 , 2 , 2 )
__lowerCamelCase : List[Any] = coords[..., 0] * (new_w / old_w)
__lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCamelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCamelCase : List[str] = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCamelCase : str = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
__lowerCamelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCamelCase : Any = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
__lowerCamelCase : Dict = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : int = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCamelCase : List[Any] = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCamelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 185 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a : Tuple = logging.get_logger(__name__)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Union[str, Any] = question_encoder
a : str = generator
a : List[Any] = self.question_encoder
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
a : Dict = os.path.join(lowerCAmelCase__ , "question_encoder_tokenizer" )
a : List[Any] = os.path.join(lowerCAmelCase__ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowerCAmelCase__ )
self.generator.save_pretrained(lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
a : List[Any] = kwargs.pop("config" , lowerCAmelCase__ )
if config is None:
a : Optional[int] = RagConfig.from_pretrained(lowerCAmelCase__ )
a : Union[str, Any] = AutoTokenizer.from_pretrained(
lowerCAmelCase__ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
a : str = AutoTokenizer.from_pretrained(
lowerCAmelCase__ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowerCAmelCase__ , generator=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
return self.current_tokenizer(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return self.generator.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
return self.generator.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[str] = self.question_encoder
def __a ( self ) -> List[str]:
a : int = self.generator
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "longest" , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowerCAmelCase__ , )
if max_length is None:
a : Optional[int] = self.current_tokenizer.model_max_length
a : List[str] = self(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , **lowerCAmelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
a : Union[str, Any] = self.current_tokenizer.model_max_length
a : List[str] = self(
text_target=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : List[str] = labels["input_ids"]
return model_inputs
| 79 |
"""simple docstring"""
from itertools import product
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->list[int]:
'''simple docstring'''
a : Dict = sides_number
a : List[str] = max_face_number * dice_number
a : Optional[int] = [0] * (max_total + 1)
a : Dict = 1
a : Optional[Any] = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
a : Union[str, Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _SCREAMING_SNAKE_CASE ( ) ->float:
'''simple docstring'''
a : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a : List[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a : Optional[Any] = 0
a : Tuple = 9
a : Union[str, Any] = 4 * 9
a : Any = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a : List[str] = (4**9) * (6**6)
a : List[Any] = peter_wins_count / total_games_number
a : Any = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :str = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :int = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :List[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :List[str] = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 49 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = BartphoTokenizer
__UpperCamelCase : Any = False
__UpperCamelCase : Any = True
def lowerCAmelCase__ ( self : Any ):
super().setUp()
UpperCamelCase_: Union[str, Any] = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
UpperCamelCase_: Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Union[str, Any] = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
UpperCamelCase_: str = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict , **snake_case_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: str = """This is a là test"""
UpperCamelCase_: str = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase_: Optional[Any] = """This is a là test"""
UpperCamelCase_: Optional[int] = """▁This ▁is ▁a ▁l à ▁t est""".split()
UpperCamelCase_: Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = tokens + [tokenizer.unk_token]
UpperCamelCase_: str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 371 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int]=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
UpperCamelCase_: Optional[int] = np.random.RandomState(snake_case_ )
UpperCamelCase_: Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: str = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**snake_case_ ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Any = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
UpperCamelCase_: Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCamelCase_: Tuple = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase_: List[Any] = pipe(**snake_case_ ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: List[str] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = ort.SessionOptions()
UpperCamelCase_: Optional[int] = False
return options
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Dict = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: List[str] = np.random.RandomState(0 )
UpperCamelCase_: List[str] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: List[Any] = init_image.resize((768, 512) )
UpperCamelCase_: Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Any = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: Dict = np.random.RandomState(0 )
UpperCamelCase_: Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Optional[int] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 223 | 0 |
def a ( snake_case__: int = 10 , snake_case__: int = 22 ):
'''simple docstring'''
lowercase_ = range(1 , snake_case__ )
lowercase_ = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"{solution(1_0, 2_2) = }")
| 30 |
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Dict ):
torch.manual_seed(0 )
_a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def UpperCamelCase__ ( self : str ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__a )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.dummy_uncond_unet
_a = DDIMScheduler()
_a = self.dummy_vq_model
_a = LDMPipeline(unet=__a , vqvae=__a , scheduler=__a )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" ).images
_a = torch.manual_seed(0 )
_a = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" , return_dict=__a )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
_a = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
_a = torch.manual_seed(0 )
_a = ldm(generator=__a , num_inference_steps=5 , output_type="numpy" ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_a = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
_a = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Union[str, Any] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : Dict = NewType("""DataClass""", Any)
lowercase__ : int = NewType("""DataClassType""", Any)
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCamelCase_ ( lowerCAmelCase__ : list ) -> Callable[[str], Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = {str(__lowerCamelCase ): choice for choice in choices}
return lambda lowerCAmelCase__ : str_to_choice.get(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( *,
lowerCAmelCase__ : Union[str, List[str]] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Any = dataclasses.MISSING , lowerCAmelCase__ : Callable[[], Any] = dataclasses.MISSING , lowerCAmelCase__ : dict = None , **lowerCAmelCase__ : str , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : List[Any] = {}
if aliases is not None:
lowerCAmelCase_ : Union[str, Any] = aliases
if help is not None:
lowerCAmelCase_ : Union[str, Any] = help
return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase )
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[DataClassType, Iterable[DataClassType]] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCamelCase )
if dataclasses.is_dataclass(_lowerCamelCase ):
lowerCAmelCase_ : Tuple = [dataclass_types]
lowerCAmelCase_ : int = list(_lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : ArgumentParser , SCREAMING_SNAKE_CASE_ : dataclasses.Field ):
lowerCAmelCase_ : int = F"--{field.name}"
lowerCAmelCase_ : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCamelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop('aliases' , [] )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ : Dict = [aliases]
lowerCAmelCase_ : List[str] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCamelCase , 'UnionType' ) and isinstance(_lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F" Problem encountered in field \'{field.name}\'." )
if type(_lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : Dict = (
field.type.__args__[0] if isinstance(_lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , _lowerCamelCase ) and issubclass(field.type , _lowerCamelCase )):
if origin_type is Literal:
lowerCAmelCase_ : Tuple = field.type.__args__
else:
lowerCAmelCase_ : Dict = [x.value for x in field.type]
lowerCAmelCase_ : Any = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
else:
lowerCAmelCase_ : List[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(_lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Optional[int] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : Optional[int] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : str = '?'
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : Optional[int] = True
elif isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ : List[str] = field.type.__args__[0]
lowerCAmelCase_ : Any = '+'
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Union[str, Any] = True
else:
lowerCAmelCase_ : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Tuple = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[str] = field.default_factory()
else:
lowerCAmelCase_ : List[str] = True
parser.add_argument(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Union[str, Any] = False
parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : DataClassType ):
if hasattr(_lowerCamelCase , '_argument_group_name' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Optional[int] = self
try:
lowerCAmelCase_ : Dict = get_type_hints(_lowerCamelCase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_lowerCamelCase ):
lowerCAmelCase_ : Any = '.'.join(map(_lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCamelCase ):
if not field.init:
continue
lowerCAmelCase_ : Tuple = type_hints[field.name]
self._parse_dataclass_field(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : Optional[Any] = []
if args_filename:
args_files.append(Path(_lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : Any = ArgumentParser()
args_file_parser.add_argument(_lowerCamelCase , type=_lowerCamelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = args_file_parser.parse_known_args(args=_lowerCamelCase )
lowerCAmelCase_ : str = vars(_lowerCamelCase ).get(args_file_flag.lstrip('-' ) , _lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCamelCase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.parse_known_args(args=_lowerCamelCase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : Any = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
lowerCAmelCase_ : int = {k: v for k, v in vars(_lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ : str = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : bool = False ):
lowerCAmelCase_ : Union[str, Any] = set(args.keys() )
lowerCAmelCase_ : Optional[int] = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : Optional[Any] = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
lowerCAmelCase_ : int = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : Optional[Any] = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowerCamelCase )}" )
return tuple(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
with open(Path(_lowerCamelCase ) , encoding='utf-8' ) as open_json_file:
lowerCAmelCase_ : Tuple = json.loads(open_json_file.read() )
lowerCAmelCase_ : List[Any] = self.parse_dict(_lowerCamelCase , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
lowerCAmelCase_ : Dict = self.parse_dict(yaml.safe_load(Path(_lowerCamelCase ).read_text() ) , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 368 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCAmelCase__ : int = 35 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : str = x_num * y_den + x_den * y_num
lowerCAmelCase_ : int = x_den * y_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : List[str] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
lowerCAmelCase_ : Dict = x_num * y_num
lowerCAmelCase_ : Optional[int] = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Any = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Tuple = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : List[str] = x_num * x_num * y_num * y_num
lowerCAmelCase_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Any = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 289 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
_A = getattr(__lowercase , __lowercase )
if weight_type is not None:
_A = getattr(__lowercase , __lowercase ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == "group" , )
_A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(__lowercase )[0].split("." )[-2]
_A = mapped_key.replace("*" , __lowercase )
if "weight_g" in name:
_A = "weight_g"
elif "weight_v" in name:
_A = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_A = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_A = "weight"
else:
_A = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = full_name.split("conv_layers." )[-1]
_A = name.split("." )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A = torch.load(__lowercase )
_A = WavLMConfigOrig(checkpoint["cfg"] )
_A = WavLMOrig(__lowercase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_A = WavLMConfig.from_pretrained(__lowercase )
else:
_A = WavLMConfig()
_A = WavLMModel(__lowercase )
recursively_load_weights(__lowercase , __lowercase )
hf_wavlm.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase_ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 79 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = old_name
if "patch_embed" in old_name:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''')
if layer == "0":
__lowerCAmelCase = old_name.replace('''0''', '''convolution1''')
elif layer == "1":
__lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''')
elif layer == "3":
__lowerCAmelCase = old_name.replace('''3''', '''convolution2''')
else:
__lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''')
if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase):
__lowerCAmelCase = r'''\b\d{2}\b'''
if bool(re.search(lowerCamelCase, lowerCamelCase)):
__lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group()
else:
__lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group()
if int(match[0]) < 6:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
__lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1])
__lowerCAmelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCAmelCase = old_name.replace(lowerCamelCase, '''''')
if int(match[2]) < num_meta4D_last_stage:
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2])
else:
__lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage)
__lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index)
if "norm1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''')
elif "norm2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''')
elif "fc1" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''')
elif "fc2" in old_name:
__lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''')
__lowerCAmelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase):
__lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''')
if "fc" in new_name:
__lowerCAmelCase = new_name.replace('''fc''', '''convolution''')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''')
if "proj" in new_name:
__lowerCAmelCase = new_name.replace('''proj''', '''projection''')
if "dist_head" in new_name:
__lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''')
elif "head" in new_name:
__lowerCAmelCase = new_name.replace('''head''', '''classifier''')
elif "patch_embed" in new_name:
__lowerCAmelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCAmelCase = new_name.replace('''norm''', '''layernorm''')
__lowerCAmelCase = '''efficientformer.''' + new_name
else:
__lowerCAmelCase = '''efficientformer.encoder.''' + new_name
return new_name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in checkpoint.copy().keys():
__lowerCAmelCase = checkpoint.pop(lowerCamelCase)
__lowerCAmelCase = val
return checkpoint
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
__lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase)
__lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase)
__lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1])
__lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
__lowerCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = 2_5_6
__lowerCAmelCase = 2_2_4
__lowerCAmelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
__lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
# original processing pipeline
__lowerCAmelCase = Compose(
[
Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']),
CenterCrop(lowerCamelCase),
ToTensor(),
Normalize(lowerCamelCase, lowerCamelCase),
])
__lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0)
assert torch.allclose(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = (1, 1_0_0_0)
if "l1" in model_name:
__lowerCAmelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCAmelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""")
# Save Checkpoints
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""")
processor.save_pretrained(lowerCamelCase)
print(F"""Processor successfuly saved at {pytorch_dump_path}""")
if push_to_hub:
print('''Pushing model to the hub...''')
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 0.9 , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
super().__init__(**UpperCamelCase_ )
__lowercase : str = size if size is not None else {'''shortest_edge''': 2_24}
__lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowercase : str = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
__lowercase : List[Any] = do_resize
__lowercase : List[Any] = size
__lowercase : Optional[Any] = crop_pct
__lowercase : Tuple = resample
__lowercase : str = do_center_crop
__lowercase : Optional[Any] = crop_size
__lowercase : Union[str, Any] = do_rescale
__lowercase : List[Any] = rescale_factor
__lowercase : List[str] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__lowercase : Any = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowercase : Any = int(size['''height'''] / crop_pct )
else:
__lowercase : int = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(UpperCamelCase_ ) )
__lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ )
else:
if "shortest_edge" in size:
__lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
__lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(UpperCamelCase_ ) )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : Any = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Dict:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> PIL.Image.Image:
__lowercase : Any = do_resize if do_resize is not None else self.do_resize
__lowercase : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
__lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowercase : Union[str, Any] = size if size is not None else self.size
__lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
__lowercase : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowercase : Union[str, Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__lowercase : List[str] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowercase : Optional[int] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowercase : Any = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowercase : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowercase : int = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 249 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = [1]
__lowercase ,__lowercase ,__lowercase : List[str] = 0, 0, 0
__lowercase : List[str] = ugly_nums[ia] * 2
__lowercase : Any = ugly_nums[ia] * 3
__lowercase : str = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
__lowercase : Union[str, Any] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
__lowercase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowercase : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowercase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_0_0) = }")
| 249 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
UpperCAmelCase_ : str = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase_ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
lowerCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowerCAmelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the training data."""} )
lowerCAmelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCAmelCase : int = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowerCAmelCase : float = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ):
A__ = {}
if self.train_dir is not None:
A__ = self.train_dir
if self.validation_dir is not None:
A__ = self.validation_dir
A__ = data_files if data_files else None
@dataclass
class UpperCamelCase :
lowerCAmelCase : str = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCAmelCase )} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase : str = field(default=_UpperCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowerCAmelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={"""help""": """Stride to use for the encoder."""} , )
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=192 , UpperCAmelCase__=32 , UpperCAmelCase__=4 , UpperCAmelCase__=0.6 ):
A__ = input_size
A__ = mask_patch_size
A__ = model_patch_size
A__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
A__ = self.input_size // self.mask_patch_size
A__ = self.mask_patch_size // self.model_patch_size
A__ = self.rand_size**2
A__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
A__ = np.random.permutation(self.token_count )[: self.mask_count]
A__ = np.zeros(self.token_count , dtype=UpperCAmelCase__ )
A__ = 1
A__ = mask.reshape((self.rand_size, self.rand_size) )
A__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( _A : Tuple )-> Optional[Any]:
"""simple docstring"""
A__ = torch.stack([example["pixel_values"] for example in examples] )
A__ = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , _A , _A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A__ = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0:
A__ = ds["train"].train_test_split(data_args.train_val_split )
A__ = split["train"]
A__ = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **_A )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **_A )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_A , "decoder_type" ):
A__ = "simmim"
# adapt config
A__ = model_args.image_size if model_args.image_size is not None else config.image_size
A__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
A__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
A__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_A )
elif model_args.model_name_or_path:
A__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_A )
else:
A__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
A__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
A__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
A__ = AutoModelForMaskedImageModeling.from_config(_A )
if training_args.do_train:
A__ = ds["train"].column_names
else:
A__ = ds["validation"].column_names
if data_args.image_column_name is not None:
A__ = data_args.image_column_name
elif "image" in column_names:
A__ = "image"
elif "img" in column_names:
A__ = "img"
else:
A__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
A__ = Compose(
[
Lambda(lambda _A : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
A__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A : Tuple ):
A__ = [transforms(_A ) for image in examples[image_column_name]]
A__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
A__ = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
A__ = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_A )
# Initialize our trainer
A__ = Trainer(
model=_A , args=_A , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
# Write model card and (optionally) push to hub
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 198 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 | 0 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def lowercase ( lowerCAmelCase__ : str ) -> Optional[int]:
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ['''WORLD_SIZE'''] )
__a = int(os.environ['''N_GPU_NODE'''] )
__a = int(os.environ['''RANK'''] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowerCamelCase_ = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCamelCase_ = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowerCamelCase_ = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , ):
'''simple docstring'''
_A = len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_A = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
_A = TER(
normalized=__UpperCAmelCase , no_punct=__UpperCAmelCase , asian_support=__UpperCAmelCase , case_sensitive=__UpperCAmelCase , )
_A = sb_ter.corpus_score(__UpperCAmelCase , __UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 79 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79 | 1 |
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Any = size
UpperCamelCase : str = [0] * size
UpperCamelCase : Any = [0] * size
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return index | (index + 1)
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return (index & (index + 1)) - 1
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = value
while index < self.size:
UpperCamelCase : Dict = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1
if current_left_border == index:
UpperCamelCase : Optional[Any] = value
else:
UpperCamelCase : int = max(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_next(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
right -= 1 # Because of right is exclusive
UpperCamelCase : Union[str, Any] = 0
while left <= right:
UpperCamelCase : List[Any] = self.get_prev(SCREAMING_SNAKE_CASE_ )
if left <= current_left:
UpperCamelCase : Any = max(SCREAMING_SNAKE_CASE_, self.tree[right] )
UpperCamelCase : Union[str, Any] = current_left
else:
UpperCamelCase : Dict = max(SCREAMING_SNAKE_CASE_, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]:
UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Optional[Any] = image_mean
UpperCamelCase : Tuple = image_std
def snake_case_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processor
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> str:
# Initialize image_processor
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processor
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
| 103 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
_snake_case = '''▁'''
class UpperCAmelCase_ ( _lowerCAmelCase):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = BarthezTokenizer
def __init__( self, __a=None, __a=None, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
__a, tokenizer_file=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : Union[str, Any] = vocab_file
_lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Optional[int] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 36 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : int=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : str=True , lowercase : Any=True , lowercase : List[str]=99 , lowercase : Union[str, Any]=32 , lowercase : Optional[Any]=5 , lowercase : Dict=4 , lowercase : Dict=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : List[Any]=512 , lowercase : str=16 , lowercase : Dict=2 , lowercase : Any=0.02 , lowercase : Any=4 , ):
"""simple docstring"""
lowercase_ :List[str] = parent
lowercase_ :Any = batch_size
lowercase_ :Dict = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[int] = use_attention_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Union[str, Any] = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Tuple = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :str = hidden_act
lowercase_ :Tuple = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Tuple = max_position_embeddings
lowercase_ :Any = type_vocab_size
lowercase_ :int = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Optional[Any] = num_choices
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_attention_mask:
lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[str] = None
if self.use_token_type_ids:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = config_and_inputs
lowercase_ :Dict = True
lowercase_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = FlaxBertModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :List[str] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 223 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
lowercase__ = """linear"""
lowercase__ = """cosine"""
lowercase__ = """cosine_with_restarts"""
lowercase__ = """polynomial"""
lowercase__ = """constant"""
lowercase__ = """constant_with_warmup"""
lowercase__ = """piecewise_constant"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ = -1 ):
return LambdaLR(a__ , lambda A_ : 1 , last_epoch=a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ = -1 ):
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : List[Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = rule_str.split(''':''' )
lowerCAmelCase__ : Union[str, Any] = int(a__ )
lowerCAmelCase__ : Optional[Any] = float(a__ )
lowerCAmelCase__ : Dict = value
lowerCAmelCase__ : List[Any] = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
lowerCAmelCase__ : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ : int = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
lowerCAmelCase__ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
lowerCAmelCase__ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
lowerCAmelCase__ : str = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ : str = lr_init - lr_end
lowerCAmelCase__ : Union[str, Any] = num_training_steps - num_warmup_steps
lowerCAmelCase__ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
__UpperCamelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
lowerCAmelCase__ : List[Any] = SchedulerType(a__ )
lowerCAmelCase__ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 370 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _a ( SCREAMING_SNAKE_CASE : bool = True , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
__lowerCAmelCase: str = False
if main_process_only:
__lowerCAmelCase: Tuple = PartialState().local_process_index == 0
return _tqdm(*_snake_case , **_snake_case , disable=_snake_case )
| 322 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase_ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase_ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __UpperCAmelCase ( self , _a ):
if self.config_name == "default":
__a = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a=None , _a=False ):
if gpus is None:
__a = 1 if torch.cuda.is_available() else 0
__a = {'src': sources, 'mt': predictions, 'ref': references}
__a = [dict(zip(_A , _A ) ) for t in zip(*data.values() )]
__a = self.scorer.predict(_A , gpus=_A , progress_bar=_A )
return {"mean_score": mean_score, "scores": scores}
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Tuple:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[Any]:
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Union[str, Any]:
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Any = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['''input_ids''']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = '''single_label_classification'''
lowerCAmelCase = input_dict['''input_ids''']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = '''multi_label_classification'''
lowerCAmelCase = input_dict['''input_ids''']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'''type''': scaling_type, '''factor''': 1_0.0}
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 338 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _snake_case ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = KandinskyVaaPipeline
_UpperCAmelCase : Any = [
"image_embeds",
"negative_image_embeds",
]
_UpperCAmelCase : List[Any] = ["image_embeds", "negative_image_embeds"]
_UpperCAmelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase : int = False
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def A ( self : str ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : int ):
'''simple docstring'''
return 100
@property
def A ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def A ( self : List[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.dummy_unet
_snake_case = self.dummy_movq
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCamelCase__ , )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A ( self : Optional[int] , lowercase : Dict , lowercase : Dict=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('mps' ):
_snake_case = torch.manual_seed(UpperCamelCase__ )
else:
_snake_case = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_snake_case = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**UpperCamelCase__ )
_snake_case = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_snake_case = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
_snake_case = output.images
_snake_case = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
_snake_case = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
_snake_case = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_snake_case = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
_snake_case = 'red cat, 4k photo'
_snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case , _snake_case = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) | 363 |
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
_snake_case = len(__lowercase )
for i in range(__lowercase ):
_snake_case = -1
for j in range(i + 1 , __lowercase ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
for i, outer in enumerate(__lowercase ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = len(__lowercase )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(__lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : Union[str, Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 130 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = old_name
if "patch_embed" in old_name:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = old_name.split('''.''' )
if layer == "0":
__SCREAMING_SNAKE_CASE : Optional[Any] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__SCREAMING_SNAKE_CASE : Tuple = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__SCREAMING_SNAKE_CASE : List[str] = old_name.replace('''3''' , '''convolution2''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = R'''\b\d{2}\b'''
if bool(re.search(lowercase__ , lowercase__ ) ):
__SCREAMING_SNAKE_CASE : Dict = re.search(R'''\d\.\d\d.''' , lowercase__ ).group()
else:
__SCREAMING_SNAKE_CASE : Optional[int] = re.search(R'''\d\.\d.''' , lowercase__ ).group()
if int(match[0] ) < 6:
__SCREAMING_SNAKE_CASE : List[Any] = old_name.replace(lowercase__ , '''''' )
__SCREAMING_SNAKE_CASE : int = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''intermediate_stages.''' + trimmed_name
else:
__SCREAMING_SNAKE_CASE : Any = old_name.replace(lowercase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__SCREAMING_SNAKE_CASE : Dict = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
__SCREAMING_SNAKE_CASE : Optional[Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__SCREAMING_SNAKE_CASE : Dict = trimmed_name.replace('''fc2''' , '''linear_out''' )
__SCREAMING_SNAKE_CASE : List[str] = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__SCREAMING_SNAKE_CASE : str = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__SCREAMING_SNAKE_CASE : List[Any] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__SCREAMING_SNAKE_CASE : Dict = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__SCREAMING_SNAKE_CASE : Tuple = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__SCREAMING_SNAKE_CASE : int = new_name.replace('''norm''' , '''layernorm''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''efficientformer.''' + new_name
else:
__SCREAMING_SNAKE_CASE : Dict = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in checkpoint.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = checkpoint.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = val
return checkpoint
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
__SCREAMING_SNAKE_CASE : Optional[Any] = EfficientFormerConfig.from_json_file(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = EfficientFormerForImageClassificationWithTeacher(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__SCREAMING_SNAKE_CASE : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
__SCREAMING_SNAKE_CASE : int = convert_torch_checkpoint(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Any = 256
__SCREAMING_SNAKE_CASE : int = 224
__SCREAMING_SNAKE_CASE : List[str] = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__SCREAMING_SNAKE_CASE : int = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__SCREAMING_SNAKE_CASE : int = Compose(
[
Resize(lowercase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowercase__ ),
ToTensor(),
Normalize(lowercase__ , lowercase__ ),
] )
__SCREAMING_SNAKE_CASE : List[str] = image_transforms(lowercase__ ).unsqueeze(0 )
assert torch.allclose(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 1000)
if "l1" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__SCREAMING_SNAKE_CASE : str = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(lowercase__ )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=lowercase__ , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase : str =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _a ( lowerCamelCase: Any , lowerCamelCase: str , lowerCamelCase: Dict , lowerCamelCase: str=10_24 ) -> Any:
'''simple docstring'''
__A , __A = [], []
__A = list(zip(lowerCamelCase , lowerCamelCase ) )
__A , __A = sorted_examples[0]
def is_too_big(lowerCamelCase: Any ):
return tok(lowerCamelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__A = new_src + ''' ''' + src
__A = new_tgt + ''' ''' + tgt
if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
__A , __A = src, tgt
else: # can fit, keep adding
__A , __A = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
return finished_src, finished_tgt
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Path , lowerCamelCase: Optional[int] , lowerCamelCase: Any ) -> Optional[int]:
'''simple docstring'''
__A = Path(lowerCamelCase )
save_path.mkdir(exist_ok=lowerCamelCase )
for split in ["train"]:
__A , __A = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__A = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
__A = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
__A , __A = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(F"""packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(lowerCamelCase ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(lowerCamelCase ) )
for split in ["val", "test"]:
__A , __A = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(lowerCamelCase , save_path / F"""{split}.source""" )
shutil.copyfile(lowerCamelCase , save_path / F"""{split}.target""" )
def _a ( ) -> Optional[int]:
'''simple docstring'''
__A = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowerCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=lowerCamelCase , default=1_28 )
parser.add_argument('''--data_dir''' , type=lowerCamelCase )
parser.add_argument('''--save_path''' , type=lowerCamelCase )
__A = parser.parse_args()
__A = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 250 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _a ( lowerCamelCase: List[Any] ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _a ( ) -> Dict:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__A = [1, 2, 3]
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=2 )
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _a ( lowerCamelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
__A = [1, 2]
__A = {'''a''': 1, '''b''': 2}
__A = {'''a''': [1, 2], '''b''': [3, 4]}
__A = {'''a''': {'''1''': 1}, '''b''': 2}
__A = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__A = [2, 3]
__A = {'''a''': 2, '''b''': 3}
__A = {'''a''': [2, 3], '''b''': [4, 5]}
__A = {'''a''': {'''1''': 2}, '''b''': 3}
__A = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
| 250 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =DebertaTokenizer
UpperCamelCase_ : List[Any] =True
UpperCamelCase_ : Optional[Any] =DebertaTokenizerFast
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowercase= {'unk_token': '[UNK]'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= 'lower newer'
__lowercase= 'lower newer'
return input_text, output_text
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= 'lower newer'
__lowercase= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowercase= tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_tokenizer()
__lowercase= tokenizer('Hello' , 'World' )
__lowercase= [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowercase= tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _A (self ):
__lowercase= [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowercase= tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowercase= [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__lowercase= tokenizer(lowerCAmelCase , padding=lowerCAmelCase )
__lowercase= [tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) for seq in encoding['input_ids']]
# fmt: off
__lowercase= {
'input_ids': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowercase= [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowerCAmelCase )
for expected, decoded in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 295 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
def __init__( self : Optional[int] , snake_case_ : int ):
_UpperCAmelCase = num_of_nodes
_UpperCAmelCase = []
_UpperCAmelCase = {}
def lowercase ( self : Optional[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase ( self : Union[str, Any] , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase ( self : Dict , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCAmelCase = self.find_component(snake_case_ )
def lowercase ( self : str , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
_UpperCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
_UpperCAmelCase = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_UpperCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
_UpperCAmelCase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :Optional[int] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple , __lowercase : Union[str, Any]=8 ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( lowerCAmelCase_ ):
def __init__( self : Dict , snake_case_ : UNetaDConditionModel , snake_case_ : DDPMScheduler , snake_case_ : VQModel , ):
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
_UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase ( self : str , snake_case_ : Tuple , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Tuple , snake_case_ : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def lowercase ( self : List[str] , snake_case_ : Optional[Any]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
_UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self : Any ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : int = 5_1_2 , snake_case_ : int = 5_1_2 , snake_case_ : int = 1_0_0 , snake_case_ : float = 4.0 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
_UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
_UpperCAmelCase = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.unet.config.in_channels
_UpperCAmelCase , _UpperCAmelCase = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = {"image_embeds": image_embeds}
_UpperCAmelCase = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase = variance_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
_UpperCAmelCase = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase = image * 0.5 + 0.5
_UpperCAmelCase = image.clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 156 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
lowerCAmelCase__ = logging.getLogger(__name__)
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : Dict = git.Repo(search_parent_directories=UpperCamelCase__ )
_A : Optional[Any] = {
"repo_id": str(UpperCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase__ , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=4 )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
if params.n_gpu <= 0:
_A : Union[str, Any] = 0
_A : Dict = -1
_A : Dict = True
_A : str = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
_A : Any = int(os.environ["WORLD_SIZE"] )
_A : List[Any] = int(os.environ["N_GPU_NODE"] )
_A : Any = int(os.environ["RANK"] )
# number of nodes / node ID
_A : int = params.world_size // params.n_gpu_per_node
_A : str = params.global_rank // params.n_gpu_per_node
_A : Any = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
_A : Dict = 1
_A : List[str] = 0
_A : int = 0
_A : List[Any] = 0
_A : str = 1
_A : Union[str, Any] = 1
_A : Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_A : str = params.node_id == 0 and params.local_rank == 0
_A : Any = params.n_nodes > 1
# summary
_A : Optional[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _UpperCAmelCase (UpperCamelCase__ : Any ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = LongformerTokenizer
UpperCAmelCase_ :Union[str, Any] = True
UpperCAmelCase_ :Optional[int] = LongformerTokenizerFast
UpperCAmelCase_ :Tuple = True
def __lowerCAmelCase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase_ :Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase_ :Optional[Any] = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , **__A ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , **__A ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :List[str] = """lower newer"""
lowerCAmelCase_ :List[str] = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ :Dict = """lower newer"""
lowerCAmelCase_ :int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
lowerCAmelCase_ :int = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :List[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__A , add_prefix_space=__A )
lowerCAmelCase_ :Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A )
lowerCAmelCase_ :List[Any] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Any = """Encode this sequence."""
lowerCAmelCase_ :Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowerCAmelCase_ :List[Any] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
lowerCAmelCase_ :Dict = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
lowerCAmelCase_ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowerCAmelCase_ :List[Any] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
lowerCAmelCase_ :Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A )
lowerCAmelCase_ :Tuple = """Encode <mask> sequence"""
lowerCAmelCase_ :Optional[Any] = """Encode <mask>sequence"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = encoded.index(__A )
lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
lowerCAmelCase_ :Dict = tokenizer.encode(__A )
lowerCAmelCase_ :List[str] = encoded.index(__A )
lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Union[str, Any] = """A, <mask> AllenNLP sentence."""
lowerCAmelCase_ :str = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
lowerCAmelCase_ :Optional[int] = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCAmelCase_ :Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCAmelCase_ :Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __lowerCAmelCase ( self ) -> Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase_ :Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""trim_offsets"""] , __A )
def __lowerCAmelCase ( self ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ :List[str] = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :Any = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :List[str] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :Dict = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :Any = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :str = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
lowerCAmelCase_ :List[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
lowerCAmelCase_ :Tuple = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : bool = True ,lowercase__ : bool = False ):
__lowercase = scheduler
__lowercase = optimizers if isinstance(lowercase__ ,(list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : str ,**lowercase__ : str ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase__ ,**lowercase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(lowercase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,'''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase__ ,**lowercase__ )
else:
self.scheduler.step(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE ( self : int ):
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ):
self.scheduler.load_state_dict(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[Any] ,**lowercase__ : List[str] ):
return self.scheduler.print_lr(*lowercase__ ,**lowercase__ )
| 104 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# We need to create solution object to save path.
__lowercase = [[0 for _ in range(A__ )] for _ in range(A__ )]
__lowercase = run_maze(A__ , 0 , 0 , A__ )
if solved:
print('''\n'''.join(str(A__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# Final check point.
if i == j == (size - 1):
__lowercase = 1
return True
__lowercase = (not i < 0) and (not j < 0) # Check lower bounds
__lowercase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
__lowercase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ""
lowercase_ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : int , ):
super().__init__(self , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase_ ( self : Tuple ):
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCAmelCase ): {'name': str(_lowerCAmelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , **_lowerCAmelCase : Optional[Any] , ):
if not isinstance(self.repo_info , _lowerCAmelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , _lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCAmelCase , mode=_lowerCAmelCase , headers=get_authentication_headers_for_url(_lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , **_lowerCAmelCase : Dict ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(_lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False , **_lowerCAmelCase : str ):
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 356 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 210 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = IFInpaintingPipeline
lowerCAmelCase_ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowerCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> Dict:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 85 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase_ : Dict = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase_ : Dict = False
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = A__ = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = pipe(**UpperCAmelCase__ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def __A ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __A ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __A ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __A ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __A ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ = torch.manual_seed(51 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
A__ = "a painting of an elephant with glasses"
A__ = [5, 7]
A__ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 198 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __get__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__ = """__cached_""" + self.fget.__name__
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
__magic_name__ = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( A_ ):
'''simple docstring'''
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_, (jnp.ndarray, Tracer) ):
return True
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return _is_numpy(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.device )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
if isinstance(A_, A_ ):
if hasattr(A_, A_ ):
__magic_name__ = getattr(A_, A_ )
else:
return False
return isinstance(A_, torch.dtype )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A_, tf.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_, """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def a__ ( A_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A_, jnp.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A_ )
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ = getattr(self , class_fields[0].name )
__magic_name__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = first_field.items()
__magic_name__ = True
else:
try:
__magic_name__ = iter(UpperCamelCase__ )
__magic_name__ = True
except TypeError:
__magic_name__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ = element[1]
elif first_field is not None:
__magic_name__ = first_field
else:
for field in class_fields:
__magic_name__ = getattr(self , field.name )
if v is not None:
__magic_name__ = v
def __delitem__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """longest"""
a__ = """max_length"""
a__ = """do_not_pad"""
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pt"""
a__ = """tf"""
a__ = """np"""
a__ = """jax"""
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[ContextManager] ) -> Any:
"""simple docstring"""
__magic_name__ = context_managers
__magic_name__ = ExitStack()
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = model_class.__name__
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( A_, A_ = "", A_ = "." ):
'''simple docstring'''
def _flatten_dict(A_, A_="", A_="." ):
for k, v in d.items():
__magic_name__ = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_, A_ ):
yield from flatten_dict(A_, A_, delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_, A_, A_ ) )
@contextmanager
def a__ ( A_, A_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.transpose(A_, axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_, perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_, axes=A_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.reshape(A_, A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_, A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_, A_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A_ )}.''' )
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.squeeze(A_, axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.expand_dims(A_, A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A_, (tuple, list) ):
__magic_name__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ = f'''{repo_id}--{value}'''
return auto_map
def a__ ( A_ ):
'''simple docstring'''
for base_class in inspect.getmro(A_ ):
__magic_name__ = base_class.__module__
__magic_name__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Dict = 9
lowercase_ : Union[str, Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase_ : Tuple = kruskal(A__ , A__ )
lowercase_ : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A__ ) == sorted(A__ )
| 356 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["pixel_values"]
def __init__( self : Optional[int], UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : Any, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {"shortest_edge": 2_5_6}
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase = get_size_dict(UpperCAmelCase__, param_name="crop_size" )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any], ):
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Optional[int], ):
__lowercase = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : str, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Tuple, ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[Any], ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(UpperCAmelCase__, param_name="crop_size" )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Tuple] = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(UpperCAmelCase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=UpperCAmelCase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase : int = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """albert"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any]=3_00_00 , __lowerCamelCase : Union[str, Any]=1_28 , __lowerCamelCase : Any=40_96 , __lowerCamelCase : str=12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=1_63_84 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="gelu_new" , __lowerCamelCase : List[str]=0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : List[str]=0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=3 , **__lowerCamelCase : int , ) -> int:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = embedding_size
a = hidden_size
a = num_hidden_layers
a = num_hidden_groups
a = num_attention_heads
a = inner_group_num
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = classifier_dropout_prob
a = position_embedding_type
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 107 | 0 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bytes:
return baseaa.aaaencode(string.encode("utf-8"))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
return baseaa.aaadecode(__UpperCamelCase).decode("utf-8")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : Tuple ,lowercase_ : List[str]=1_3 ,lowercase_ : int=7 ,lowercase_ : Optional[Any]=True ,lowercase_ : Any=True ,lowercase_ : int=True ,lowercase_ : Any=True ,lowercase_ : str=9_9 ,lowercase_ : Union[str, Any]=3_2 ,lowercase_ : str=2 ,lowercase_ : Union[str, Any]=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : Any="gelu" ,lowercase_ : str=0.1 ,lowercase_ : Dict=0.1 ,lowercase_ : Optional[int]=5_1_2 ,lowercase_ : Optional[Any]=1_6 ,lowercase_ : Optional[Any]=2 ,lowercase_ : int=0.02 ,lowercase_ : Union[str, Any]=3 ,lowercase_ : Optional[Any]=4 ,lowercase_ : Dict=None ,):
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Dict = 1_3
lowerCAmelCase__ : List[str] = 7
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = 9_9
lowerCAmelCase__ : Any = 3_2
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : Optional[Any] = 4
lowerCAmelCase__ : Any = 3_7
lowerCAmelCase__ : Tuple = '''gelu'''
lowerCAmelCase__ : Any = 0.1
lowerCAmelCase__ : Union[str, Any] = 0.1
lowerCAmelCase__ : List[Any] = 5_1_2
lowerCAmelCase__ : str = 1_6
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : List[Any] = 0.02
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Union[str, Any] = 4
lowerCAmelCase__ : Optional[int] = None
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=lowercase_ ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[str] ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : Optional[int] ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Dict ):
lowerCAmelCase__ : Optional[int] = TFRoFormerModel(config=lowercase_ )
lowerCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : str = [input_ids, input_mask]
lowerCAmelCase__ : str = model(lowercase_ )
lowerCAmelCase__ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : int ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Tuple = TFRoFormerForCausalLM(config=lowercase_ )
lowerCAmelCase__ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ : Dict = model(lowercase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) ,[self.batch_size, self.seq_length, self.vocab_size] )
def __lowerCAmelCase ( self : int ,lowercase_ : Any ,lowercase_ : int ,lowercase_ : int ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Optional[int] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[Any] = TFRoFormerForMaskedLM(config=lowercase_ )
lowerCAmelCase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : List[Any] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : int ,lowercase_ : List[Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : List[str] = TFRoFormerForSequenceClassification(config=lowercase_ )
lowerCAmelCase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int ,lowercase_ : List[str] ,lowercase_ : Dict ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : str ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : List[Any] = self.num_choices
lowerCAmelCase__ : Tuple = TFRoFormerForMultipleChoice(config=lowercase_ )
lowerCAmelCase__ : List[str] = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Tuple = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Tuple = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : int ,lowercase_ : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : List[str] ,lowercase_ : List[str] ,lowercase_ : Optional[int] ,lowercase_ : Any ,lowercase_ : Dict ):
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : List[Any] = TFRoFormerForTokenClassification(config=lowercase_ )
lowerCAmelCase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : str ,lowercase_ : List[str] ,lowercase_ : Any ,lowercase_ : Any ,lowercase_ : Dict ,lowercase_ : str ,lowercase_ : Any ,lowercase_ : Any ):
lowerCAmelCase__ : List[Any] = TFRoFormerForQuestionAnswering(config=lowercase_ )
lowerCAmelCase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : int ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Any = TFRoFormerModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self ,config_class=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase_ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Dict = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCAmelCase__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Optional[int] = model(lowercase_ )[0]
# TODO Replace vocab size
lowerCAmelCase__ : int = 5_0_0_0_0
lowerCAmelCase__ : int = [1, 6, vocab_size]
self.assertEqual(output.shape ,lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase__ : Optional[int] = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowercase_ ,atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1e-4
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = tf.constant([[4, 1_0]] )
lowerCAmelCase__ : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 ,embedding_dim=6 )
lowerCAmelCase__ : List[Any] = emba(input_ids.shape )
lowerCAmelCase__ : int = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowercase_ ,lowercase_ ,atol=self.tolerance )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
lowerCAmelCase__ : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 ,embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
lowerCAmelCase__ : int = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ ,lowercase_ ,atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1e-4
def __lowerCAmelCase ( self : str ):
# 2,12,16,64
lowerCAmelCase__ : Optional[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 ,dtype=tf.floataa ) ,shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCAmelCase__ : Optional[Any] = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 ,dtype=tf.floataa ) ,shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCAmelCase__ : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 ,embedding_dim=6_4 )
lowerCAmelCase__ : str = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
lowerCAmelCase__ ,lowerCAmelCase__ : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : str = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
lowerCAmelCase__ : Union[str, Any] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] ,lowercase_ ,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] ,lowercase_ ,atol=self.tolerance )
| 106 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a__ : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "mask2former"
snake_case__ : Any = ["swin"]
snake_case__ : Optional[Any] = {"hidden_size": "hidden_dim"}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 1_0_2_4 , UpperCAmelCase__ : str = "relu" , UpperCAmelCase__ : int = 6 , UpperCAmelCase__ : int = 1_0 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 2_0_4_8 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 2_5_5 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : int = 1_2_5_4_4 , UpperCAmelCase__ : float = 3.0 , UpperCAmelCase__ : float = 0.75 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : List[int] = [4, 8, 1_6, 3_2] , UpperCAmelCase__ : bool = None , **UpperCAmelCase__ : List[Any] , ) -> Optional[int]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["swin"](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("model_type" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = mask_feature_size
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = encoder_feedforward_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = dim_feedforward
__SCREAMING_SNAKE_CASE = pre_norm
__SCREAMING_SNAKE_CASE = enforce_input_projection
__SCREAMING_SNAKE_CASE = common_stride
__SCREAMING_SNAKE_CASE = ignore_value
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = no_object_weight
__SCREAMING_SNAKE_CASE = class_weight
__SCREAMING_SNAKE_CASE = mask_weight
__SCREAMING_SNAKE_CASE = dice_weight
__SCREAMING_SNAKE_CASE = train_num_points
__SCREAMING_SNAKE_CASE = oversample_ratio
__SCREAMING_SNAKE_CASE = importance_sample_ratio
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = use_auxiliary_loss
__SCREAMING_SNAKE_CASE = feature_strides
__SCREAMING_SNAKE_CASE = output_auxiliary_logits
__SCREAMING_SNAKE_CASE = decoder_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : int ) -> Dict:
return cls(
backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Any ) -> Dict[str, any]:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 357 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A__ : Union[str, Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase__ = finetuning_task
lowercase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
elif "squad" in finetuning_task:
lowercase__ = finetuning_task
lowercase__ = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
A__ : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 207 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_snake_case : Dict = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_snake_case : str = 0
_snake_case : List[Any] = 0xE_000
_snake_case : Optional[int] = 0xE_001
_snake_case : List[str] = 0xE_002
_snake_case : Optional[int] = 0xE_003
_snake_case : int = 0xE_004
# Maps special codepoints to human-readable names.
_snake_case : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_snake_case : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( _a ):
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Any=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : int=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Optional[int]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : int=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Union[str, Any]=chr(lowerCAmelCase_ ) , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[Any]=20_48 , **lowerCAmelCase_ : Optional[int] , ) -> Any:
"""simple docstring"""
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , model_max_length=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
_a = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_a = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_a = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_a = UNICODE_VOCAB_SIZE
_a = len(self._special_codepoints )
@property
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
return list(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
try:
return ord(lowerCAmelCase_ )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase_ )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return "".join(lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
_a = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
_a = [1] + ([0] * len(lowerCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase_ )) + [1]
return result
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
_a = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Any:
"""simple docstring"""
return ()
| 179 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Dict = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = LongformerTokenizer
a__ : int = True
a__ : Union[str, Any] = LongformerTokenizerFast
a__ : Dict = True
def _lowercase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def _lowercase (self : Optional[int] , **__a : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : str , **__a : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : Dict , __a : List[Any] ):
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode(
"sequence builders" , add_special_tokens=__a , add_prefix_space=__a )
UpperCAmelCase_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__a , add_prefix_space=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = "Encode this sequence."
UpperCAmelCase_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
UpperCAmelCase_ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
UpperCAmelCase_ = "Encode <mask> sequence"
UpperCAmelCase_ = "Encode <mask>sequence"
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = encoded.index(__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = encoded.index(__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def _lowercase (self : Tuple ):
pass
def _lowercase (self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
UpperCAmelCase_ = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _lowercase (self : int ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __a )
self.assertEqual(post_processor_state["add_prefix_space"] , __a )
self.assertEqual(post_processor_state["trim_offsets"] , __a )
def _lowercase (self : Tuple ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
UpperCAmelCase_ = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 1 | '''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = ['pixel_values']
def __init__(self : List[str] , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : bool = True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = size if size is not None else {'''shortest_edge''': 224}
__snake_case = get_size_dict(a__ , default_to_square=a__ )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(a__ , default_to_square=a__ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def a (self : Tuple , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case = get_resize_output_image_size(a__ , size=size['''shortest_edge'''] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def a (self : Optional[int] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def a (self : Dict , a__ : np.ndarray , a__ : Union[int, float] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : str , ):
"""simple docstring"""
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a (self : List[Any] , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Union[str, Any] , ):
"""simple docstring"""
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def a (self : str , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : int = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : bool = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a__ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a__ , param_name='''size''' , default_to_square=a__ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(a__ , param_name='''crop_size''' , default_to_square=a__ )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a__ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
__snake_case = [to_channel_dimension_format(a__ , a__ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 238 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase__ : Dict = TypeVar('T')
lowerCamelCase__ : Optional[Any] = TypeVar('U')
class lowerCamelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : T | None , _lowerCAmelCase : U | None ):
SCREAMING_SNAKE_CASE_ = key
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __repr__( self : Union[str, Any] ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class lowerCamelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.rear, self.head
def __repr__( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = ['DoubleLinkedList']
SCREAMING_SNAKE_CASE_ = self.head
while node.next is not None:
rep.append(str(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : DoubleLinkedListNode[T, U] ):
SCREAMING_SNAKE_CASE_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = previous
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = self.rear
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE_ = node.next
SCREAMING_SNAKE_CASE_ = node.prev
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
return node
class lowerCamelCase_ ( Generic[T, U] ):
'''simple docstring'''
lowercase_ = {}
def __init__( self : Any , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = DoubleLinkedList()
SCREAMING_SNAKE_CASE_ = capacity
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def __repr__( self : Optional[int] ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : List[str] , _lowerCAmelCase : T ):
return key in self.cache
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE_ = self.cache[key]
SCREAMING_SNAKE_CASE_ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCAmelCase )
return node.val
self.miss += 1
return None
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : T , _lowerCAmelCase : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE_ = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE_ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE_ = value
self.list.add(_lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : int , _lowerCAmelCase : int = 128 ):
def cache_decorator_inner(_lowerCAmelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowerCAmelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE_ = LRUCache(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE_ = func(*_lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCAmelCase , 'cache_info' , _lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 225 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 225 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase = 16
__UpperCamelCase = 32
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : str = "bert-base-cased" ) -> int:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE = int(config['seed'] )
SCREAMING_SNAKE_CASE = int(config['batch_size'] )
SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
SCREAMING_SNAKE_CASE = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE = 0
# Now we train the model
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase () -> Optional[int]:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 , help='Number of train epochs.' , )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.