code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowercase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowercase_ = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowercase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowercase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowercase_ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRQuestionEncoderTokenizer
lowercase_ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowercase_ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowercase_ = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCAmelCase__ )
class __UpperCamelCase :
"""simple docstring"""
def __call__( self : List[str] , _A : Tuple , _A : Optional[str] = None , _A : Optional[str] = None , _A : Union[bool, str] = False , _A : Union[bool, str] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , **_A : Dict , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
elif titles is None or texts is None:
__SCREAMING_SNAKE_CASE : Tuple = titles if texts is None else texts
return super().__call__(
_A , _A , padding=_A , truncation=_A , max_length=_A , return_tensors=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = titles if not isinstance(_A , _A ) else [titles]
__SCREAMING_SNAKE_CASE : Any = texts if not isinstance(_A , _A ) else [texts]
__SCREAMING_SNAKE_CASE : List[Any] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = questions if not isinstance(_A , _A ) else [questions] * n_passages
assert len(_A ) == len(
_A ), F'''There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.'''
__SCREAMING_SNAKE_CASE : Tuple = super().__call__(_A , _A , padding=_A , truncation=_A )['''input_ids''']
__SCREAMING_SNAKE_CASE : Any = super().__call__(_A , add_special_tokens=_A , padding=_A , truncation=_A )['''input_ids''']
__SCREAMING_SNAKE_CASE : str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A , _A )
]
}
if return_attention_mask is not False:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__SCREAMING_SNAKE_CASE : str = attention_mask
return self.pad(_A , padding=_A , max_length=_A , return_tensors=_A )
def UpperCAmelCase__ ( self : int , _A : BatchEncoding , _A : DPRReaderOutput , _A : int = 16 , _A : int = 64 , _A : int = 4 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = reader_input['''input_ids''']
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = reader_output[:3]
__SCREAMING_SNAKE_CASE : int = len(_A )
__SCREAMING_SNAKE_CASE : str = sorted(range(_A ) , reverse=_A , key=relevance_logits.__getitem__ )
__SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__SCREAMING_SNAKE_CASE : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__SCREAMING_SNAKE_CASE : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__SCREAMING_SNAKE_CASE : int = sequence_ids.index(self.pad_token_id )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A )
__SCREAMING_SNAKE_CASE : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_A , top_spans=_A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_A , start_index=_A , end_index=_A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__ ( self : List[str] , _A : List[int] , _A : List[int] , _A : int , _A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(_A , key=lambda _A : x[1] , reverse=_A )
__SCREAMING_SNAKE_CASE : int = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__SCREAMING_SNAKE_CASE : Dict = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = DPRReaderTokenizer
| 74
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(lowercase )
UpperCAmelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self : Any , lowercase : str , lowercase : List[str]=0 ):
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
UpperCAmelCase : Optional[Any] = torch.manual_seed(lowercase )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase : Any = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Any = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCAmelCase : List[str] = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : Dict = "."
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase : Dict = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase : List[str] = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : List[Any] = "."
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : str = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase : Any = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase : Optional[int] = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase : List[Any] = "."
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
UpperCAmelCase : Any = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 292
|
"""simple docstring"""
from functools import reduce
snake_case_ : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase_ ( _lowercase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 292
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
_a = 42
_a = None
_a = None
__snake_case = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a ( __a ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = get_distrib(node.left )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = get_distrib(node.right )
UpperCamelCase__ :Tuple = 1 - left_distrib_excess
UpperCamelCase__ :Optional[int] = 1 - right_distrib_excess
UpperCamelCase__ :int = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
UpperCamelCase__ :Optional[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = LxmertTokenizer
_a = LxmertTokenizerFast
_a = True
_a = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase__ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''UNwant\u00E9d,running'''
UpperCamelCase__ :Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase__ :int = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
UpperCamelCase__ :str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :int = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 189
| 1
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "linear"
_UpperCamelCase: int = "cosine"
_UpperCamelCase: Dict = "cosine_with_restarts"
_UpperCamelCase: Any = "polynomial"
_UpperCamelCase: List[str] = "constant"
_UpperCamelCase: Optional[int] = "constant_with_warmup"
_UpperCamelCase: str = "piecewise_constant"
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ):
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE__ ,lambda SCREAMING_SNAKE_CASE__ : 1 ,last_epoch=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ):
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE__ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE__ ) / float(max(1.0 ,SCREAMING_SNAKE_CASE__ ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : List[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
lowerCAmelCase , lowerCAmelCase : List[Any] = rule_str.split(""":""" )
lowerCAmelCase : List[str] = int(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = float(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = value
lowerCAmelCase : Optional[Any] = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
def rule_func(SCREAMING_SNAKE_CASE__ ) -> float:
lowerCAmelCase : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase : Optional[Any] = create_rules_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE__ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE__ ) / float(max(1 ,SCREAMING_SNAKE_CASE__ ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.5 ,SCREAMING_SNAKE_CASE__ = -1 ):
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE__ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE__ ) / float(max(1 ,SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE__ ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = -1 ):
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE__ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE__ ) / float(max(1 ,SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE__ ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=1e-7 ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
lowerCAmelCase : List[str] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE__ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE__ ) / float(max(1 ,SCREAMING_SNAKE_CASE__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase : Optional[int] = lr_init - lr_end
lowerCAmelCase : Union[str, Any] = num_training_steps - num_warmup_steps
lowerCAmelCase : Union[str, Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 1.0 ,SCREAMING_SNAKE_CASE__ = -1 ,):
'''simple docstring'''
lowerCAmelCase : Tuple = SchedulerType(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE__ ,step_rules=SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE__ ,num_warmup_steps=SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE__ ,num_warmup_steps=SCREAMING_SNAKE_CASE__ ,num_training_steps=SCREAMING_SNAKE_CASE__ ,num_cycles=SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE__ ,num_warmup_steps=SCREAMING_SNAKE_CASE__ ,num_training_steps=SCREAMING_SNAKE_CASE__ ,power=SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ ,)
return schedule_func(
SCREAMING_SNAKE_CASE__ ,num_warmup_steps=SCREAMING_SNAKE_CASE__ ,num_training_steps=SCREAMING_SNAKE_CASE__ ,last_epoch=SCREAMING_SNAKE_CASE__ )
| 693
|
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693
| 1
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=18 , __lowerCAmelCase : Union[str, Any]=30 , __lowerCAmelCase : Optional[Any]=4_00 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=True , ) -> Dict:
_A = size if size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
def snake_case_ ( self : Dict ) -> Union[str, Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self : List[str] ) -> str:
_A = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self : List[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''clusters''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
def snake_case_ ( self : List[Any] ) -> Any:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case_ ( self : str ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
_A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __lowerCAmelCase )
def snake_case_ ( self : int ) -> Tuple:
_A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(__lowerCAmelCase , '''image_processor.json''' )
image_processor_first.to_json_file(__lowerCAmelCase )
_A = self.image_processing_class.from_json_file(__lowerCAmelCase ).to_dict()
_A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
def snake_case_ ( self : Dict ) -> int:
_A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCAmelCase )
_A = self.image_processing_class.from_pretrained(__lowerCAmelCase ).to_dict()
_A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_A = Image.open(dataset[4]['''file'''] )
_A = Image.open(dataset[5]['''file'''] )
_A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ) -> str:
_A = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_A = prepare_images()
# test non-batched
_A = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_A = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __lowerCAmelCase )
# test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_A = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __lowerCAmelCase )
| 2
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a :Optional[Any] = "<<<<<<< This should probably be modified because it mentions: "
a :Tuple = "=======\n>>>>>>>\n"
a :str = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a :Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def _lowercase ( __lowerCAmelCase ) -> int:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __a (UpperCamelCase_):
'''simple docstring'''
@staticmethod
def _a ( _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_a , required=_a , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_a , required=_a , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , *_a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = get_logger("""datasets-cli/converting""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tfds_path
SCREAMING_SNAKE_CASE__ : List[Any] = datasets_directory
def _a ( self ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
SCREAMING_SNAKE_CASE__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.listdir(_a )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(_a , _a )
if not os.path.isfile(_a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_a , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Dict = []
for line in lines:
SCREAMING_SNAKE_CASE__ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE__ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE__ : Any = """from datasets import logging\n"""
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = list(filter(lambda _a : e in out_line , _a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_a ) + """\n""" )
out_lines.append(_a )
out_lines.append(_a )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE__ : int = re.sub(_a , _a , _a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE__ : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
SCREAMING_SNAKE_CASE__ : Dict = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
out_lines.append(_a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f_name.replace(""".py""" , """""" )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_a , _a )
os.makedirs(_a , exist_ok=_a )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_a )
if needs_manual_update:
with_manual_update.append(_a )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_a )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE__ : str = os.path.basename(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_a , _a )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 680
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = XLMTokenizer
lowercase__ = False
def lowercase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a : List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : Any = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : int , __snake_case : str ):
a : Dict = 'lower newer'
a : Optional[int] = 'lower newer'
return input_text, output_text
def lowercase_ ( self : Optional[int] ):
a : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
a : Union[str, Any] = 'lower'
a : Any = ['low', 'er</w>']
a : str = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : List[str] = tokens + ['<unk>']
a : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@slow
def lowercase_ ( self : Any ):
a : Optional[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
a : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=__snake_case )
a : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=__snake_case )
a : Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 195
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase__ ( _A ):
return EnvironmentCommand()
class a__( lowerCamelCase__ ):
@staticmethod
def lowercase_ ( __snake_case : ArgumentParser ):
a : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__snake_case )
def lowercase_ ( self : List[str] ):
a : str = huggingface_hub.__version__
a : List[str] = 'not installed'
a : List[str] = 'NA'
if is_torch_available():
import torch
a : Optional[Any] = torch.__version__
a : int = torch.cuda.is_available()
a : Optional[int] = 'not installed'
if is_transformers_available():
import transformers
a : Tuple = transformers.__version__
a : Dict = 'not installed'
if is_accelerate_available():
import accelerate
a : int = accelerate.__version__
a : Any = 'not installed'
if is_xformers_available():
import xformers
a : Optional[int] = xformers.__version__
a : List[str] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase_ ( __snake_case : Union[str, Any] ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 195
| 1
|
'''simple docstring'''
import operator as op
def __A ( a_ : int ):
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = lambda a_ ,a_ : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase : Tuple = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) ,"Action".center(1_2 ) ,"Stack" ,sep=" | " )
print("-" * (3_0 + len(_lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) ,("push(" + x + ")").ljust(1_2 ) ,",".join(_lowerCAmelCase ) ,sep=" | " )
else:
lowerCAmelCase : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) ,("pop(" + b + ")").ljust(1_2 ) ,",".join(_lowerCAmelCase ) ,sep=" | " )
lowerCAmelCase : int = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) ,("pop(" + a + ")").ljust(1_2 ) ,",".join(_lowerCAmelCase ) ,sep=" | " )
stack.append(
str(opr[x](int(_lowerCAmelCase ) ,int(_lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) ,("push(" + a + x + b + ")").ljust(1_2 ) ,",".join(_lowerCAmelCase ) ,sep=" | " ,)
return int(stack[0] )
if __name__ == "__main__":
lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 525
|
'''simple docstring'''
def _A ( _lowerCAmelCase = 50 ):
"""simple docstring"""
__lowercase =[[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 474
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : int = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ["""PerceiverFeatureExtractor"""]
__UpperCamelCase : Any = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
def a_ ( _A , _A ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_A ) , _A )
return number - int(_A )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 372
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 283
|
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( )->None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 283
| 1
|
from collections import deque
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = deque()
UpperCAmelCase_ : Dict = [False for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Optional[Any] = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Tuple = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase_ : Dict = index # the number when this node is seen
UpperCAmelCase_ : str = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase_ : Union[str, Any] = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase_ : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = stack.pop()
UpperCAmelCase_ : Union[str, Any] = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
UpperCAmelCase_ : Union[str, Any] = stack.pop()
UpperCAmelCase_ : str = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
UpperCAmelCase_ : List[str] = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase = 7
_lowerCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 715
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ : Dict = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=2_00 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
UpperCAmelCase_ : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ : Tuple = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=2_00 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
UpperCAmelCase_ : Optional[int] = alltweets[-1].id - 1
print(F'''...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
UpperCAmelCase_ : str = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 323
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = ['image_processor', 'tokenizer']
snake_case__ :List[Any] = 'ChineseCLIPImageProcessor'
snake_case__ :Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.image_processor
def __call__( self : List[Any] , __magic_name__ : Tuple=None , __magic_name__ : Any=None , __magic_name__ : str=None , **__magic_name__ : List[str] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : str , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
| 48
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
snake_case__ : List[str] = TypeVar("""T""")
def _snake_case (__lowercase):
return (position - 1) // 2
def _snake_case (__lowercase):
return (2 * position) + 1
def _snake_case (__lowercase):
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def _UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase_ , UpperCamelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase_ , UpperCamelCase_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Update the weight of the given key
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ = (elem, weight)
if position > 0:
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase_ = get_parent_position(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase_ = self.position_map[elem]
UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos]
UpperCamelCase_ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ = self.heap[nodea_pos][0]
UpperCamelCase_ , UpperCamelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase_ = nodea_pos
UpperCamelCase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = {}
UpperCamelCase_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase_ = {}
self.nodes += 1
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def _snake_case (__lowercase , ):
UpperCamelCase_ = {node: maxsize for node in graph.connections}
UpperCamelCase_ = {node: None for node in graph.connections}
UpperCamelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase , __lowercase)
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase_ = priority_queue.extract_min()
UpperCamelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase , dist[neighbour])
UpperCamelCase_ = node
return dist, parent
| 23
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict =["""pixel_values"""]
def __init__( self : Union[str, Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : Tuple=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Optional[Any] , ) ->None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = do_rescale
SCREAMING_SNAKE_CASE : List[str] = size_divisor
SCREAMING_SNAKE_CASE : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE : Any = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE : Optional[int] = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE : Dict = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : List[Any] ) ->np.ndarray:
"""simple docstring"""
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Tuple , ) ->BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : str = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
SCREAMING_SNAKE_CASE : int = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : int = [self.rescale(UpperCAmelCase__ , scale=1 / 2_5_5 ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""gpt_neox_japanese"""
def __init__( self : Any , UpperCAmelCase__ : Any=3_2_0_0_0 , UpperCAmelCase__ : Dict=2_5_6_0 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=1.00 , UpperCAmelCase__ : List[Any]=1_0_0_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=3_1_9_9_6 , UpperCAmelCase__ : Tuple=3_1_9_9_9 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_multiple_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = rotary_pct
SCREAMING_SNAKE_CASE : Tuple = rotary_emb_base
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = hidden_dropout
| 446
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[str] = logging.get_logger(__name__)
__A : str = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : int=[256, 512, 1024, 2048] , __lowerCamelCase : Dict=[3, 4, 6, 3] , __lowerCamelCase : List[str]="bottleneck" , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : List[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Union[str, Any] ):
return 1e-3
| 16
|
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowerCamelCase : Any = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_lowerCamelCase : Tuple = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_lowerCamelCase : List[Any] = """|""".join(sys.argv[1:])
_lowerCamelCase : Dict = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_lowerCamelCase : Dict = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 177
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''xlm'''
UpperCAmelCase__ = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=30_145 , UpperCAmelCase__ : List[str]=2_048 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[str]=2_048**-0.5 , UpperCAmelCase__ : List[Any]=1e-12 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple="first" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : str=0 , **UpperCAmelCase__ : List[str] , ) ->str:
'''simple docstring'''
A__ = vocab_size
A__ = emb_dim
A__ = n_layers
A__ = n_heads
A__ = dropout
A__ = attention_dropout
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = use_lang_emb
A__ = layer_norm_eps
A__ = bos_index
A__ = eos_index
A__ = pad_index
A__ = unk_index
A__ = mask_index
A__ = is_encoder
A__ = max_position_embeddings
A__ = embed_init_std
A__ = init_std
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_proj_to_labels
A__ = summary_first_dropout
A__ = start_n_top
A__ = end_n_top
A__ = mask_token_id
A__ = lang_id
if "n_words" in kwargs:
A__ = kwargs['''n_words''']
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 177
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""module.encoder""" ,"""glpn.encoder""" )
if key.startswith("""module.decoder""" ):
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""module.decoder""" ,"""decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
SCREAMING_SNAKE_CASE__ : int = key.replace(f'''patch_embed{idx}''' ,f'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""norm""" ,"""layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ : str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace(f'''layer_norm{idx}''' ,f'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""layer_norm1""" ,"""layer_norm_1""" )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ : str = key.replace("""layer_norm2""" ,"""layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ : List[str] = key[key.find("""block""" ) + len("""block""" )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace(f'''block{idx}''' ,f'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("""attn.q""" ,"""attention.self.query""" )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace("""attn""" ,"""attention.self""" )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("""fc1""" ,"""dense1""" )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""fc2""" ,"""dense2""" )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""linear_pred""" ,"""classifier""" )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""linear_fuse.conv""" ,"""linear_fuse""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""linear_fuse.bn""" ,"""batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ : Optional[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace(f'''linear_c{idx}''' ,f'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""bot_conv""" ,"""0.convolution""" )
if "skip_conv1" in key:
SCREAMING_SNAKE_CASE__ : str = key.replace("""skip_conv1""" ,"""1.convolution""" )
if "skip_conv2" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("""skip_conv2""" ,"""2.convolution""" )
if "fusion1" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace("""fusion1""" ,"""1.fusion""" )
if "fusion2" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""fusion2""" ,"""2.fusion""" )
if "fusion3" in key:
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""fusion3""" ,"""3.fusion""" )
if "fusion" in key and "conv" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace("""conv""" ,"""convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""module.last_layer_depth""" ,"""head.head""" )
SCREAMING_SNAKE_CASE__ : int = value
return new_state_dict
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : List[Any] = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ : List[Any] = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return image
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=False ,_snake_case=None ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
SCREAMING_SNAKE_CASE__ : List[Any] = GLPNImageProcessor()
# prepare image
SCREAMING_SNAKE_CASE__ : Dict = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=_snake_case ,return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(_snake_case ,map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE__ : str = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case ,_snake_case )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
SCREAMING_SNAKE_CASE__ : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=_snake_case ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=_snake_case ,)
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 223
|
'''simple docstring'''
import os
import sys
import unittest
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase = os.path.join(git_repo_path, '''src''', '''transformers''')
_lowerCAmelCase = '''
{0} = None
'''
_lowerCAmelCase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
_lowerCAmelCase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(__UpperCAmelCase ,"""tokenizers""" )
lowerCAmelCase__ : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(__UpperCAmelCase ,"""tensorflow_text""" )
lowerCAmelCase__ : List[Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(__UpperCAmelCase ,"""sentencepiece_and_tokenizers""" )
lowerCAmelCase__ : Optional[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(__UpperCAmelCase ,"""sentencepiece_and_tensorflow_text""" )
lowerCAmelCase__ : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(__UpperCAmelCase ,"""sentencepiece_and_tokenizers_and_vision""" )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" ,__UpperCAmelCase )
self.assertIn("""tensorflow_text""" ,__UpperCAmelCase )
self.assertIn("""sentencepiece_and_tokenizers""" ,__UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertModel""" ,objects["""tf"""] )
self.assertIn("""FlaxBertModel""" ,objects["""flax"""] )
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" ,objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" ,objects["""sentencepiece_and_tokenizers"""] )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = create_dummy_object("""CONSTANT""" ,"""'torch'""" )
self.assertEqual(__UpperCAmelCase ,"""\nCONSTANT = None\n""" )
lowerCAmelCase__ : int = create_dummy_object("""function""" ,"""'torch'""" )
self.assertEqual(
__UpperCAmelCase ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCAmelCase__ : Tuple = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCAmelCase__ : Optional[Any] = create_dummy_object("""FakeClass""" ,"""'torch'""" )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Tuple = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCAmelCase__ : List[str] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] ,__UpperCAmelCase )
| 565
| 0
|
import logging
import os
from .state import PartialState
class __magic_name__ ( logging.LoggerAdapter):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
UpperCamelCase__ : List[Any] = kwargs.pop('''main_process_only''' , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''in_order''' , lowerCamelCase__ )
if self.isEnabledFor(lowerCamelCase__ ):
if self._should_log(lowerCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
elif in_order:
UpperCamelCase__ : int = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase__ , UpperCamelCase__ : int = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
state.wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
if log_level is None:
UpperCamelCase__ : Tuple = os.environ.get('''ACCELERATE_LOG_LEVEL''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 106
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a :int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __a (_lowerCamelCase , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE :Dict = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE :Optional[int] = True
_SCREAMING_SNAKE_CASE :Tuple = True
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : int = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = '''<pad>'''
SCREAMING_SNAKE_CASE__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_A ) , 1_002 )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaTokenizer(_A , keep_accents=_A )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.save_pretrained(_A )
SCREAMING_SNAKE_CASE__ : Any = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Any = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : str = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : int = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE__ : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def _a ( self ) -> Any:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def _a ( self ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaTokenizer(f.name , keep_accents=_A )
SCREAMING_SNAKE_CASE__ : List[str] = pickle.dumps(_A )
pickle.loads(_A )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = '''Hello World!'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
SCREAMING_SNAKE_CASE__ : str = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 680
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Any = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__magic_name__ )
EnvironmentCommand.register_subcommand(__magic_name__ )
TestCommand.register_subcommand(__magic_name__ )
RunBeamCommand.register_subcommand(__magic_name__ )
DummyDataCommand.register_subcommand(__magic_name__ )
# Parse args
lowercase , lowercase : Optional[int] = parser.parse_known_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
lowercase : int = parse_unknown_args(__magic_name__ )
# Run
lowercase : str = args.func(__magic_name__ , **__magic_name__ )
service.run()
if __name__ == "__main__":
main()
| 217
| 0
|
'''simple docstring'''
import functools
from typing import Any
def snake_case__ ( _A: str , _A: list[str] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
lowerCAmelCase = {}
lowerCAmelCase = """WORD_KEEPER"""
for word in words:
lowerCAmelCase = trie
for c in word:
if c not in trie_node:
lowerCAmelCase = {}
lowerCAmelCase = trie_node[c]
lowerCAmelCase = True
lowerCAmelCase = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_A: int ) -> bool:
if index == len_string:
return True
lowerCAmelCase = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
__lowercase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 605
| 0
|
def __lowercase ( lowerCamelCase : list ):
def merge(lowerCamelCase : list , lowerCamelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCamelCase ) <= 1:
return collection
UpperCamelCase_ : str = len(lowerCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 417
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417
| 1
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[int]=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : List[str]="silu" , lowerCamelCase : Any=3 , lowerCamelCase : List[Any]=32 , lowerCamelCase : int=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=True , lowerCamelCase : int=True , lowerCamelCase : int=10 , lowerCamelCase : str=None , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__lowercase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : str = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Any = False
_snake_case : int = False
_snake_case : Tuple = False
_snake_case : str = False
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any ):
__lowercase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case_ ( ):
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 655
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 1
|
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> Dict:
snake_case_ = val
snake_case_ = None
snake_case_ = None
def _UpperCamelCase ( self , a ) -> Union[str, Any]:
if self.val:
if val < self.val:
if self.left is None:
snake_case_ = Node(a )
else:
self.left.insert(a )
elif val > self.val:
if self.right is None:
snake_case_ = Node(a )
else:
self.right.insert(a )
else:
snake_case_ = val
def __UpperCAmelCase ( a_ , a_):
# Recursive traversal
if root:
inorder(root.left , a_)
res.append(root.val)
inorder(root.right , a_)
def __UpperCAmelCase ( a_):
# Build BST
if len(a_) == 0:
return arr
snake_case_ = Node(arr[0])
for i in range(1 , len(a_)):
root.insert(arr[i])
# Traverse BST in order.
snake_case_ = []
inorder(a_ , a_)
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 198
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , a ):
snake_case_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 198
| 1
|
from __future__ import annotations
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = str(_snake_case )
return n == n[::-1]
def UpperCAmelCase ( _snake_case = 1000000 ):
lowerCAmelCase = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 33
|
from __future__ import annotations
from typing import Generic, TypeVar
UpperCAmelCase_ =TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = self
lowerCAmelCase = 0
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# create a new set with x as its member
lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
# find the set x belongs to (with path-compression)
lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase = nodea
else:
lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
lowerCAmelCase = weight
lowerCAmelCase = weight
def __snake_case ( self ):
lowerCAmelCase = []
lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index]
index += 1
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 33
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """swin2sr"""
snake_case_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] ,A : Tuple=64 ,A : Tuple=1 ,A : List[Any]=3 ,A : Dict=180 ,A : int=[6, 6, 6, 6, 6, 6] ,A : Optional[Any]=[6, 6, 6, 6, 6, 6] ,A : Any=8 ,A : Optional[int]=2.0 ,A : Tuple=True ,A : int=0.0 ,A : Dict=0.0 ,A : int=0.1 ,A : List[Any]="gelu" ,A : Optional[Any]=False ,A : Union[str, Any]=0.0_2 ,A : str=1e-5 ,A : Optional[int]=2 ,A : int=1.0 ,A : Any="1conv" ,A : Tuple="pixelshuffle" ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : str = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Union[str, Any] = len(A )
UpperCAmelCase__ : str = num_heads
UpperCAmelCase__ : int = window_size
UpperCAmelCase__ : int = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = drop_path_rate
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[Any] = use_absolute_embeddings
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Dict = upscale
UpperCAmelCase__ : str = img_range
UpperCAmelCase__ : int = resi_connection
UpperCAmelCase__ : str = upsampler
| 65
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """is_longer"""]
def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,):
'''simple docstring'''
super().__init__(
feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,)
UpperCAmelCase__ : List[Any] = top_db
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Optional[int] = padding
UpperCAmelCase__ : List[Any] = fft_window_size
UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1
UpperCAmelCase__ : Any = hop_length
UpperCAmelCase__ : List[str] = max_length_s
UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate
UpperCAmelCase__ : List[Any] = sampling_rate
UpperCAmelCase__ : Optional[int] = frequency_min
UpperCAmelCase__ : Tuple = frequency_max
UpperCAmelCase__ : List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,)
UpperCAmelCase__ : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = spectrogram(
A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : List[str] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : int = [0]
# randomly choose index for each part
UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] )
UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] )
UpperCAmelCase__ : str = np.random.choice(ranges[2] )
UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] )
UpperCAmelCase__ : int = torch.nn.functional.interpolate(
A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A )
UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy()
UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase__ : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase__ : str = len(A ) - max_length
UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 )
UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length]
UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters )
UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 )
UpperCAmelCase__ : Any = False
else:
UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A )
UpperCAmelCase__ : List[str] = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
UpperCAmelCase__ : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase__ : str = int(max_length / len(A ) )
UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase__ : List[Any] = int(max_length / len(A ) )
UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) )
UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters )
UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation
UpperCAmelCase__ : Dict = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : List[str] = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase__ : Tuple = [
self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A )
for waveform in raw_speech
]
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) )
UpperCAmelCase__ : int = True
if isinstance(input_mel[0] ,A ):
UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer]
UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase__ : str = BatchFeature(A )
if return_tensors is not None:
UpperCAmelCase__ : int = input_features.convert_to_tensors(A )
return input_features
| 65
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
lowerCAmelCase_ : Tuple = {}
if train_file is not None:
lowerCAmelCase_ : List[Any] = [train_file]
if eval_file is not None:
lowerCAmelCase_ : Dict = [eval_file]
if test_file is not None:
lowerCAmelCase_ : List[Any] = [test_file]
lowerCAmelCase_ : Optional[Any] = datasets.load_dataset("csv" , data_files=UpperCAmelCase__)
lowerCAmelCase_ : Dict = list(ds[list(files.keys())[0]].features.keys())
lowerCAmelCase_ : int = features_name.pop(UpperCAmelCase__)
lowerCAmelCase_ : Tuple = list(set(ds[list(files.keys())[0]][label_name]))
lowerCAmelCase_ : Any = {label: i for i, label in enumerate(UpperCAmelCase__)}
lowerCAmelCase_ : List[Any] = tokenizer.model_input_names
lowerCAmelCase_ : Optional[Any] = {}
if len(UpperCAmelCase__) == 1:
for k in files.keys():
lowerCAmelCase_ : Any = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length") , batched=UpperCAmelCase__ , )
elif len(UpperCAmelCase__) == 2:
for k in files.keys():
lowerCAmelCase_ : List[Any] = ds[k].map(
lambda snake_case__: tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="max_length" , ) , batched=UpperCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCAmelCase_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCAmelCase_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase_ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCAmelCase_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase_ : Any = labelaid[ex[label_name]]
yield (d, label)
lowerCAmelCase_ : Optional[int] = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCAmelCase_ : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
lowerCAmelCase_ : Union[str, Any] = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCAmelCase_ : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
lowerCAmelCase_ : List[Any] = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCAmelCase_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return train_ds, val_ds, test_ds, labelaid
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(metadata={'help': 'Which column contains the label'} )
UpperCamelCase_ = field(default=_a , metadata={'help': 'The path of the training file'} )
UpperCamelCase_ = field(default=_a , metadata={'help': 'The path of the development file'} )
UpperCamelCase_ = field(default=_a , metadata={'help': 'The path of the test file'} )
UpperCamelCase_ = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ = field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase ( ):
lowerCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, '''
F'''16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCAmelCase__) , labelaid=UpperCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__) -> Dict:
lowerCAmelCase_ : Union[str, Any] = np.argmax(p.predictions , axis=1)
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCAmelCase_ : Optional[int] = TFTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
lowerCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCAmelCase_ : List[str] = trainer.evaluate()
lowerCAmelCase_ : Dict = os.path.join(training_args.output_dir , "eval_results.txt")
with open(UpperCAmelCase__ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(F''' {key} = {value}''')
writer.write(F'''{key} = {value}\n''')
results.update(UpperCAmelCase__)
return results
if __name__ == "__main__":
main()
| 714
|
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( __snake_case ):
'''simple docstring'''
def __lt__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
UpperCamelCase = []
# sort into stacks
for element in collection:
UpperCamelCase = Stack([element])
UpperCamelCase = bisect_left(_UpperCamelCase , _UpperCamelCase)
if i != len(_UpperCamelCase):
stacks[i].append(_UpperCamelCase)
else:
stacks.append(_UpperCamelCase)
# use a heap-based merge to merge stack efficiently
UpperCamelCase = merge(*(reversed(_UpperCamelCase) for stack in stacks))
return collection
if __name__ == "__main__":
__magic_name__ : str = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 410
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase = TextStreamer(_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase = cs.out[:-1]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.decode(greedy_ids[0] )
UpperCamelCase = TextIteratorStreamer(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
UpperCamelCase = Thread(target=model.generate , kwargs=_SCREAMING_SNAKE_CASE )
thread.start()
UpperCamelCase = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=_SCREAMING_SNAKE_CASE )
UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase = TextStreamer(_SCREAMING_SNAKE_CASE , skip_prompt=_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase = cs.out[:-1]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('distilgpt2' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
UpperCamelCase = torch.ones((1, 5) , device=_SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase = TextStreamer(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase = cs.out[:-1] # Remove the final "\n"
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = TextIteratorStreamer(_SCREAMING_SNAKE_CASE , timeout=0.0_0_1 )
UpperCamelCase = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
UpperCamelCase = Thread(target=model.generate , kwargs=_SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = ''
for new_text in streamer:
streamer_text += new_text
| 410
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = '''.'''
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 209
| 1
|
'''simple docstring'''
from math import sqrt
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> bool:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
snake_case = True
# 0 and 1 are none primes.
if number <= 1:
snake_case = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case = list(range(2 , n + 1 ) )
snake_case = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case = 0
# filters actual prime numbers.
snake_case = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> List[str]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
snake_case = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
snake_case = [] # this list will be returns of the function.
# potential prime number factors.
snake_case = 2
snake_case = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case = 0
# prime factorization of 'number'
snake_case = prime_factorization(__snake_case )
snake_case = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> int:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case = 0
# prime factorization of 'number'
snake_case = prime_factorization(__snake_case )
snake_case = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
snake_case = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case = get_prime_numbers(__snake_case )
snake_case = len(__snake_case )
# run variable for while-loops.
snake_case = 0
snake_case = None
# exit variable. for break up the loops
snake_case = True
while i < len_pn and loop:
snake_case = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ) -> str:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case = 0
while numbera != 0:
snake_case = numbera % numbera
snake_case = numbera
snake_case = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case = prime_factorization(__snake_case )
snake_case = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
snake_case = []
snake_case = []
snake_case = max(__snake_case , __snake_case )
snake_case = 0
snake_case = 0
snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case = prime_fac_a.count(__snake_case )
snake_case = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
snake_case = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Tuple:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
snake_case = 0
snake_case = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case = p_number_a + 1 # jump to the next number
snake_case = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> List[Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
snake_case = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> str:
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> Optional[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Optional[Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
snake_case = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
snake_case = 0
snake_case = 1
snake_case = 1 # this will be return
for _ in range(n - 1 ):
snake_case = ans
ans += fiba
snake_case = tmp
return ans
| 712
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case = ksize + 1
snake_case = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
snake_case = x - ksize // 2
snake_case = y - ksize // 2
# degree to radiant
snake_case = theta / 1_80 * np.pi
snake_case = np.cos(_theta )
snake_case = np.sin(_theta )
# get kernel x
snake_case = cos_theta * px + sin_theta * py
# get kernel y
snake_case = -sin_theta * px + cos_theta * py
# fill kernel
snake_case = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_SCREAMING_SNAKE_CASE = imread("../image_data/lena.jpg")
# turn image in gray scale value
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_SCREAMING_SNAKE_CASE = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_SCREAMING_SNAKE_CASE = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_SCREAMING_SNAKE_CASE = out / out.max() * 255
_SCREAMING_SNAKE_CASE = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 517
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , )
assert hasattr(self , 'env' )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
snake_case_ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__UpperCamelCase , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
TrainingJobAnalytics(__UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
| 187
|
from __future__ import annotations
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case_ , snake_case_ = array[indexa], array[indexa]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
for i in range(lowercase__ , low + middle ):
comp_and_swap(lowercase__ , lowercase__ , i + middle , lowercase__ )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
bitonic_merge(lowercase__ , low + middle , lowercase__ , lowercase__ )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
bitonic_sort(lowercase__ , lowercase__ , lowercase__ , 1 )
bitonic_sort(lowercase__ , low + middle , lowercase__ , 0 )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
A = input('Enter numbers separated by a comma:\n').strip()
A = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 187
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 515
|
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while second != 0:
__UpperCamelCase : int = first & second
first ^= second
__UpperCamelCase : Dict = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input('Enter the first number: ').strip())
UpperCamelCase = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 515
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : tuple[int, int] , __lowerCamelCase : int ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = position
__UpperCAmelCase : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCAmelCase : Dict = []
for position in positions:
__UpperCAmelCase , __UpperCAmelCase : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCamelCase )
return permissible_positions
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : int ):
if is_complete(__lowerCamelCase ):
return True
for position in get_valid_pos(__lowerCamelCase , len(__lowerCamelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = position
if board[y][x] == 0:
__UpperCAmelCase : List[Any] = curr + 1
if open_knight_tour_helper(__lowerCamelCase , __lowerCamelCase , curr + 1 ):
return True
__UpperCAmelCase : Union[str, Any] = 0
return False
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Union[str, Any] = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = 1
if open_knight_tour_helper(__lowerCamelCase , (i, j) , 1 ):
return board
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Optional[int] ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 237
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase : Dict =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
__A : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] )
__A : Union[str, Any] = g.get_repo('huggingface/diffusers' )
__A : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
__A : Any = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
__A : Optional[int] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 237
| 1
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_lowerCAmelCase : str = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
_lowerCAmelCase : List[Any] = current_sum - array[i] + array[i + k]
_lowerCAmelCase : List[Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
UpperCamelCase_ = [randint(-10_00, 10_00) for i in range(1_00)]
UpperCamelCase_ = randint(0, 1_10)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 384
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """bart"""
__lowerCAmelCase : Tuple = ["""past_key_values"""]
__lowerCAmelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=1_0_2_4 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=1_2 , snake_case_=4_0_9_6 , snake_case_=1_6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_="gelu" , snake_case_=1_0_2_4 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.0 , snake_case_=False , snake_case_=True , snake_case_=3 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=True , snake_case_=2 , snake_case_=2 , **snake_case_ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[Any] = encoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : Optional[Any] = decoder_ffn_dim
_lowerCAmelCase : Any = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : int = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Union[str, Any] = init_std
_lowerCAmelCase : List[Any] = encoder_layerdrop
_lowerCAmelCase : int = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : Tuple = use_cache
_lowerCAmelCase : List[Any] = encoder_layers
_lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case_ ):
_lowerCAmelCase : Dict = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class a_ (_a ):
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase : List[str] = {0: """batch"""}
_lowerCAmelCase : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __UpperCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[int] = super().outputs
else:
_lowerCAmelCase : int = super(snake_case_ , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase : str = self.num_layers
for i in range(snake_case_ ):
_lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
_lowerCAmelCase : Union[str, Any] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : List[str] = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape
_lowerCAmelCase : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.num_attention_heads
_lowerCAmelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[str] = decoder_seq_length + 3
_lowerCAmelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
_lowerCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.num_layers
_lowerCAmelCase : List[str] = min(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = max(snake_case_ , snake_case_ ) - min_num_layers
_lowerCAmelCase : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
_lowerCAmelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase : Any = self.num_layers
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
_lowerCAmelCase : Any = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Tuple = tokenizer.num_special_tokens_to_add(snake_case_ )
_lowerCAmelCase : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
_lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 384
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """van"""
def __init__( self : Optional[int] , __a : Optional[int]=224 , __a : Dict=3 , __a : int=[7, 3, 3, 3] , __a : Optional[Any]=[4, 2, 2, 2] , __a : Any=[64, 128, 320, 512] , __a : Any=[3, 3, 12, 3] , __a : Optional[int]=[8, 8, 4, 4] , __a : List[Any]="gelu" , __a : Any=0.02 , __a : Optional[int]=1e-6 , __a : Optional[Any]=1e-2 , __a : Tuple=0.0 , __a : Union[str, Any]=0.0 , **__a : Optional[int] , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__: str = image_size
lowerCamelCase__: Tuple = num_channels
lowerCamelCase__: Dict = patch_sizes
lowerCamelCase__: str = strides
lowerCamelCase__: List[Any] = hidden_sizes
lowerCamelCase__: int = depths
lowerCamelCase__: Union[str, Any] = mlp_ratios
lowerCamelCase__: Optional[Any] = hidden_act
lowerCamelCase__: int = initializer_range
lowerCamelCase__: Tuple = layer_norm_eps
lowerCamelCase__: Tuple = layer_scale_init_value
lowerCamelCase__: List[Any] = drop_path_rate
lowerCamelCase__: int = dropout_rate
| 242
|
class lowerCamelCase__ :
def __init__( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = {}
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__a , """ -> """ , """ -> """.join([str(__a ) for j in self.vertex[i]] ) )
def lowerCamelCase_ ( self : int , __a : int , __a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__a )
else:
# else make a new vertex
lowerCamelCase__: Any = [to_vertex]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__a , __a )
def lowerCamelCase_ ( self : List[str] , __a : int , __a : list ):
'''simple docstring'''
lowerCamelCase__: Any = True
print(__a , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__a , __a )
if __name__ == "__main__":
_lowercase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 242
| 1
|
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = IFInpaintingPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCamelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ):
'''simple docstring'''
return self._get_dummy_components()
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowercase_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 100
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = IFImgaImgSuperResolutionPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def A__ ( self ):
return self._get_superresolution_dummy_components()
def A__ ( self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(__lowercase )
else:
UpperCAmelCase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
UpperCAmelCase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def A__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def A__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A__ ( self ):
self._test_save_load_local()
def A__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 422
|
"""simple docstring"""
import math
a : str = 10
a : List[Any] = 7
a : Tuple = BALLS_PER_COLOUR * NUM_COLOURS
def snake_case__ ( _SCREAMING_SNAKE_CASE = 2_0 ) ->str:
UpperCAmelCase__ = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 422
| 1
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]:
return None
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> int:
return None
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'tf' , 12 , **a)
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'pt' , 12 , **a)
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
from transformers import BertModel
SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t') as vocab_file:
vocab_file.write('\n'.join(a))
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(a)))
model.save_pretrained(a)
self._test_export(a , 'pt' , 12 , a)
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(a , 'tf' , 12 , **a)
SCREAMING_SNAKE_CASE = quantize(Path(a))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model')
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(a , 'pt' , 12 , **a)
SCREAMING_SNAKE_CASE = quantize(a)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , **a) -> List[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(a).joinpath('model.onnx')
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a , a , a , a , a , **a)
return path
except Exception as e:
self.fail(a)
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random'))
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random')
self._test_infer_dynamic_axis(a , a , 'pt')
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random'))
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random')
self._test_infer_dynamic_axis(a , a , 'tf')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(a , a)
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(a , a)
# Assert all variables are present
self.assertEqual(len(a) , len(a))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , a)
self.assertSequenceEqual(variable_names[3:] , a)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'})
self.assertDictEqual(shapes['output_1'] , {0: 'batch'})
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids']
SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , a , a)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a) , 3)
# Should have exactly the same input names
self.assertEqual(set(a) , set(a))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , a , a)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a) , 1)
self.assertEqual(len(a) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'])
self.assertEqual(ordered_input_names[0] , 'input_ids')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx') , '-test')
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix())
| 73
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_snake_case = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_snake_case = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_snake_case = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=True , __lowerCamelCase: Tuple=False ) -> str:
if rouge_types is None:
__UpperCAmelCase : Optional[Any] = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__UpperCAmelCase : Tuple = rouge_scorer.RougeScorer(rouge_types=__lowerCamelCase , use_stemmer=__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : Union[str, Any] = scoring.BootstrapAggregator()
else:
__UpperCAmelCase : str = []
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Dict = scorer.score(__lowerCamelCase , __lowerCamelCase )
if use_aggregator:
aggregator.add_scores(__lowerCamelCase )
else:
scores.append(__lowerCamelCase )
if use_aggregator:
__UpperCAmelCase : Tuple = aggregator.aggregate()
else:
__UpperCAmelCase : Union[str, Any] = {}
for key in scores[0]:
__UpperCAmelCase : Dict = [score[key] for score in scores]
return result
| 382
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = tf.data.AUTOTUNE
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" ,type=_snake_case ,default="roberta-base" ,help="The model config to use. Note that we don't copy the model's weights, only the config!" ,)
parser.add_argument(
"--tokenizer" ,type=_snake_case ,default="unigram-tokenizer-wikitext" ,help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." ,)
parser.add_argument(
"--per_replica_batch_size" ,type=_snake_case ,default=8 ,help="Batch size per TPU core." ,)
parser.add_argument(
"--no_tpu" ,action="store_true" ,help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." ,)
parser.add_argument(
"--tpu_name" ,type=_snake_case ,help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." ,default="local" ,)
parser.add_argument(
"--tpu_zone" ,type=_snake_case ,help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." ,)
parser.add_argument(
"--gcp_project" ,type=_snake_case ,help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" ,action="store_true" ,help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." ,)
parser.add_argument(
"--train_dataset" ,type=_snake_case ,help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." ,)
parser.add_argument(
"--shuffle_buffer_size" ,type=_snake_case ,default=2**18 ,help="Size of the shuffle buffer (in samples)" ,)
parser.add_argument(
"--eval_dataset" ,type=_snake_case ,help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." ,)
parser.add_argument(
"--num_epochs" ,type=_snake_case ,default=1 ,help="Number of epochs to train for." ,)
parser.add_argument(
"--learning_rate" ,type=_snake_case ,default=1e-4 ,help="Learning rate to use for training." ,)
parser.add_argument(
"--weight_decay_rate" ,type=_snake_case ,default=1e-3 ,help="Weight decay rate to use for training." ,)
parser.add_argument(
"--max_length" ,type=_snake_case ,default=512 ,help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" ,)
parser.add_argument(
"--mlm_probability" ,type=_snake_case ,default=0.15 ,help="Fraction of tokens to mask during training." ,)
parser.add_argument("--output_dir" ,type=_snake_case ,required=_snake_case ,help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" ,type=_snake_case ,help="Model ID to upload to on the Hugging Face Hub." )
lowercase__ = parser.parse_args()
return args
def lowerCamelCase ( _snake_case : Optional[Any] ):
'''simple docstring'''
try:
if args.tpu_name:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(_snake_case )
tf.tpu.experimental.initialize_tpu_system(_snake_case )
return tpu
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
lowercase__ = 0
for file in file_list:
lowercase__ = file.split("/" )[-1]
lowercase__ = re.search(R"-\d+-(\d+)\.tfrecord" ,_snake_case ).group(1 )
lowercase__ = int(_snake_case )
num_samples += sample_count
return num_samples
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Any=None ):
'''simple docstring'''
lowercase__ = count_samples(_snake_case )
lowercase__ = tf.data.Dataset.from_tensor_slices(_snake_case )
if shuffle:
lowercase__ = dataset.shuffle(len(_snake_case ) )
lowercase__ = tf.data.TFRecordDataset(_snake_case ,num_parallel_reads=_snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase__ = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) )
lowercase__ = dataset.map(_snake_case ,num_parallel_calls=_snake_case )
if shuffle:
assert shuffle_buffer_size is not None
lowercase__ = dataset.shuffle(args.shuffle_buffer_size )
lowercase__ = dataset.batch(_snake_case ,drop_remainder=_snake_case )
lowercase__ = dataset.map(_snake_case ,num_parallel_calls=_snake_case )
lowercase__ = dataset.prefetch(_snake_case )
return dataset
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
if not args.no_tpu:
lowercase__ = initialize_tpu(_snake_case )
lowercase__ = tf.distribute.TPUStrategy(_snake_case )
else:
lowercase__ = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase__ = tokenizer.vocab_size
lowercase__ = tf.io.gfile.glob(os.path.join(args.train_dataset ,"*.tfrecord" ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"*.tfrecord" ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase__ = count_samples(_snake_case )
lowercase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase__ = TFAutoModelForMaskedLM.from_config(_snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase__ , lowercase__ = create_optimizer(
num_train_steps=_snake_case ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case ,metrics=["accuracy"] )
def decode_fn(_snake_case : List[str] ):
lowercase__ = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_snake_case ,_snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=_snake_case ,mlm_probability=args.mlm_probability ,mlm=_snake_case ,return_tensors="tf" )
def mask_with_collator(_snake_case : Optional[int] ):
# TF really needs an isin() function
lowercase__ = (
~tf.cast(batch["attention_mask"] ,tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
lowercase__ , lowercase__ = data_collator.tf_mask_tokens(
batch["input_ids"] ,vocab_size=len(_snake_case ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=_snake_case ,)
return batch
lowercase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase__ = prepare_dataset(
_snake_case ,decode_fn=_snake_case ,mask_fn=_snake_case ,batch_size=_snake_case ,shuffle=_snake_case ,shuffle_buffer_size=args.shuffle_buffer_size ,)
lowercase__ = prepare_dataset(
_snake_case ,decode_fn=_snake_case ,mask_fn=_snake_case ,batch_size=_snake_case ,shuffle=_snake_case ,)
lowercase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=_snake_case ) )
model.fit(
_snake_case ,validation_data=_snake_case ,epochs=args.num_epochs ,callbacks=_snake_case ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
main(args)
| 710
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/blenderbot_small-90M": 512,
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :List[Any] = BlenderbotSmallTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> Optional[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase_ ,merges=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,) ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = add_prefix_space
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> Optional[int]:
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> List[int]:
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 539
| 0
|
'''simple docstring'''
import sys
def lowercase__( _UpperCamelCase : str )-> List[str]:
"""simple docstring"""
_UpperCamelCase = len(lowerCAmelCase_ )
_UpperCamelCase = [[0 for x in range(lowerCAmelCase_ )] for x in range(lowerCAmelCase_ )]
_UpperCamelCase = [[0 for x in range(lowerCAmelCase_ )] for x in range(lowerCAmelCase_ )]
for chain_length in range(2 , lowerCAmelCase_ ):
for a in range(1 , n - chain_length + 1 ):
_UpperCamelCase = a + chain_length - 1
_UpperCamelCase = sys.maxsize
for c in range(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_UpperCamelCase = cost
_UpperCamelCase = c
return matrix, sol
def lowercase__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : int )-> Tuple:
"""simple docstring"""
if i == j:
print("A" + str(lowerCAmelCase_ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowerCAmelCase_ , lowerCAmelCase_ , optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase_ , optimal_solution[i][j] + 1 , lowerCAmelCase_ )
print(")" , end=" " )
def lowercase__( )-> Tuple:
"""simple docstring"""
_UpperCamelCase = [30, 35, 15, 5, 10, 20, 25]
_UpperCamelCase = len(lowerCAmelCase_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_UpperCamelCase = matrix_chain_order(lowerCAmelCase_ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase_ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 138
|
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =[]
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE_ : Dict =True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE_ : Tuple =False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 220
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case :List[str] = logging.get_logger(__name__)
__snake_case :Dict = '''▁'''
__snake_case :Dict = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__snake_case :Any = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__snake_case :Any = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
__snake_case :Optional[Any] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__snake_case :Tuple = {'''mustc''': MUSTC_LANGS}
class _A ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : int = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = MAX_MODEL_INPUT_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Any = []
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : Dict="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__a = do_upper_case
__a = do_lower_case
__a = load_json(__snake_case)
__a = {v: k for k, v in self.encoder.items()}
__a = spm_file
__a = load_spm(__snake_case , self.sp_model_kwargs)
if lang_codes is not None:
__a = lang_codes
__a = LANGUAGES[lang_codes]
__a = [F'<lang:{lang}>' for lang in self.langs]
__a = {lang: self.sp_model.PieceToId(F'<lang:{lang}>') for lang in self.langs}
__a = self.lang_tokens
__a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
__a = {}
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return len(self.encoder)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.lang_code_to_id[tgt_lang]
__a = [lang_code_id]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder[self.unk_token])
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return self.decoder.get(__snake_case , self.unk_token)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__a = self.sp_model.decode(__snake_case)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__a = []
else:
current_sub_tokens.append(__snake_case)
__a = self.sp_model.decode(__snake_case)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
__a = [1] * len(self.prefix_tokens)
__a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case)) + suffix_ones
return prefix_ones + ([0] * len(__snake_case)) + ([0] * len(__snake_case)) + suffix_ones
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : str):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Any , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = load_spm(self.spm_file , self.sp_model_kwargs)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = Path(__snake_case)
assert save_dir.is_dir(), F'{save_directory} should be a directory'
__a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __snake_case)
if os.path.abspath(self.spm_file) != os.path.abspath(__snake_case) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , __snake_case)
elif not os.path.isfile(self.spm_file):
with open(__snake_case , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (str(__snake_case), str(__snake_case))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = sentencepiece.SentencePieceProcessor(**a_ )
spm.Load(str(a_ ) )
return spm
def __snake_case ( _UpperCAmelCase ):
with open(a_ , '''r''' ) as f:
return json.load(a_ )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with open(a_ , '''w''' ) as f:
json.dump(a_ , a_ , indent=2 )
| 705
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60
| 0
|
from collections import namedtuple
SCREAMING_SNAKE_CASE : str = namedtuple("from_to", "from_ to")
SCREAMING_SNAKE_CASE : Tuple = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = """
Human: <<task>>
Assistant: """
A = """huggingface-tools/default-prompts"""
A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]:
"""simple docstring"""
if prompt_or_repo_id is None:
__UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase ) is not None:
return prompt_or_repo_id
__UpperCAmelCase : str = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 77
| 0
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( _snake_case : jnp.ndarray , _snake_case : int , _snake_case : float = 1 , _snake_case : float = 1 , _snake_case : float = 1.0E4 , _snake_case : bool = False , _snake_case : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_A = float(embedding_dim // 2 )
_A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_A = min_timescale * jnp.exp(jnp.arange(_snake_case , dtype=jnp.floataa ) * -log_timescale_increment )
_A = jnp.expand_dims(_snake_case , 1 ) * jnp.expand_dims(_snake_case , 0 )
# scale embeddings
_A = scale * emb
if flip_sin_to_cos:
_A = jnp.concatenate([jnp.cos(_snake_case ), jnp.sin(_snake_case )] , axis=1 )
else:
_A = jnp.concatenate([jnp.sin(_snake_case ), jnp.cos(_snake_case )] , axis=1 )
_A = jnp.reshape(_snake_case , [jnp.shape(_snake_case )[0], embedding_dim] )
return signal
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] ):
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_UpperCAmelCase )
_A = nn.silu(_UpperCAmelCase )
_A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_UpperCAmelCase )
return temb
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : bool = False
UpperCAmelCase : float = 1
@nn.compact
def __call__( self : List[str] , _UpperCAmelCase : List[Any] ):
return get_sinusoidal_embeddings(
_UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 505
|
"""simple docstring"""
def _snake_case ( _snake_case : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _snake_case ( _snake_case : str ) -> bytes:
'''simple docstring'''
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE_:Any = float("""nan""")
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : List[Any] = sys.stdout
A : Dict = open(lowerCamelCase__, """a""" )
def __getattr__( self, lowerCamelCase__ ):
return getattr(self.stdout, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.stdout.write(lowerCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""", """""", lowerCamelCase__, 0, re.M ) )
def __UpperCamelCase ( _lowerCAmelCase=80 , _lowerCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
A : Any = []
# deal with critical env vars
A : Optional[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
A : List[Any] = os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
A : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A : Tuple = []
A : Tuple = """"""
while len(_lowerCAmelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
A : int = """"""
return "\\\n".join(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
A : Optional[Any] = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
A : str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
A : List[str] = subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
A : Union[str, Any] = variation.replace(""" """ , """-""" )
with open(Path(_lowerCAmelCase ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
A : Union[str, Any] = json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = []
A : Tuple = []
A : Any = f'''{id}: {variation:<{longest_variation_len}}'''
A : Union[str, Any] = f'''{preamble}: '''
A : List[str] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
A : List[Any] = process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
A : Tuple = f'''\33[2K\r{outcome}'''
if len(_lowerCAmelCase ) > 0:
A : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
A : Tuple = f'''{outcome} {mean_target}'''
if len(_lowerCAmelCase ) > 1:
results_str += f''' {tuple(round(_lowerCAmelCase , 2 ) for x in results )}'''
print(_lowerCAmelCase )
A : int = variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : List[Any] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : Tuple = pd.DataFrame(_lowerCAmelCase )
A : str = """variation"""
A : List[str] = """diff_%"""
A : Optional[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
A : Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
A : Dict = df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
A : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A : Tuple = df.reindex(_lowerCAmelCase , axis="""columns""" ) # reorder cols
# capitalize
A : Tuple = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
A : Tuple = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
A : Optional[int] = df.rename(lambda _lowerCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
A : Union[str, Any] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt=""".2f""" )]
print("""\n\n""".join(_lowerCAmelCase ) )
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs="""+""" , required=_lowerCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=_lowerCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=_lowerCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=_lowerCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=_lowerCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
A : List[str] = parser.parse_args()
A : List[Any] = args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
A : Dict = get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
A : str = [list(map(str.strip , re.split(R"""\|""" , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A : Tuple = list(map(str.strip , map(""" """.join , itertools.product(*_lowerCAmelCase ) ) ) )
A : Union[str, Any] = max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
A : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
A : Dict = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
A : Optional[Any] = Tee(_lowerCAmelCase )
print(f'''\n*** Running {len(_lowerCAmelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_lowerCAmelCase )}''' )
A : List[str] = """variation"""
A : List[Any] = []
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc="""Total completion: """ , leave=_lowerCAmelCase ) ):
A : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 662
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662
| 1
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> list[list[float]]:
'''simple docstring'''
snake_case__ : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__magic_name__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case__ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
snake_case__ : Any = [[0.0, 0.0], [0.0, 0.0]]
snake_case__ : List[str] = matrix[1][1], matrix[0][0]
snake_case__ : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__magic_name__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__magic_name__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case__ : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
snake_case__ : Tuple = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case__ : int = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case__ : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case__ : Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case__ : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case__ : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case__ : str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case__ : Tuple = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case__ : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case__ : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case__ : Optional[Any] = array(__magic_name__ )
for i in range(3 ):
for j in range(3 ):
snake_case__ : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case__ : Union[str, Any] = array(__magic_name__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__magic_name__ )
# Calculate the inverse of the matrix
return [[float(d(__magic_name__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 719
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Tuple = BlipImageProcessor()
snake_case__ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : Dict = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Union[str, Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : int = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """lower newer"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = """lower newer"""
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 419
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = "▁"
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = BigBirdTokenizer
_snake_case : List[Any] = BigBirdTokenizerFast
_snake_case : Any = True
_snake_case : Optional[int] = True
def __a ( self :Union[str, Any] ):
super().setUp()
UpperCamelCase__ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :str ):
UpperCamelCase__ :List[str] = """<s>"""
UpperCamelCase__ :str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ) , 10_04 )
def __a ( self :Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __a ( self :Optional[Any] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :List[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Tuple = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCamelCase__ :List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __a ( self :Dict ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Dict = """Hello World!"""
UpperCamelCase__ :Any = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCamelCase__ :Any = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def __a ( self :str ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCamelCase__ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ :Optional[Any] = """ """.join(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = BigBirdConfig(attention_type="""original_full""" )
UpperCamelCase__ :List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Any = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCamelCase__ :Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __a ( self :Union[str, Any] ):
# fmt: off
UpperCamelCase__ :int = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 45
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( _lowercase ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self: List[Any] ):
raise NotImplementedError()
| 631
|
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 50 ):
'''simple docstring'''
snake_case: Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 329
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 50 ):
'''simple docstring'''
snake_case: Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 329
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return choice(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = random_pivot(_lowerCamelCase )
# partition based on pivot
# linear time
_lowerCAmelCase : List[str] = [e for e in lst if e < pivot]
_lowerCAmelCase : int = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCamelCase ) < k - 1:
return kth_number(_lowerCamelCase , k - len(_lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16
| 1
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer
__UpperCamelCase = GPTaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'''add_prefix_space''': True}
__UpperCamelCase = False
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def UpperCAmelCase ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Tuple ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__lowercase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = """lower newer"""
__lowercase = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
__lowercase = """lower newer"""
# Testing tokenization
__lowercase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
__lowercase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
__lowercase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
__lowercase = tokenizer.encode(snake_case_ , add_prefix_space=snake_case_ )
__lowercase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any]=15 ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='max_length' )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='max_length' )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' , )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(snake_case_ , padding='max_length' , max_length=30 , return_tensors='np' )
__lowercase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors='np' )
__lowercase = tokenizer(*snake_case_ , padding='max_length' , max_length=60 , return_tensors='np' )
__lowercase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowercase = """$$$"""
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case_ , add_bos_token=snake_case_ )
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(snake_case_ )
__lowercase = tokenizer(snake_case_ )
self.assertEqual(out_s.input_ids[0] , snake_case_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
__lowercase = [self.get_tokenizer(do_lower_case=snake_case_ , add_bos_token=snake_case_ )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = """Encode this."""
__lowercase = """This one too please."""
__lowercase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
encoded_sequence += tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowercase = tokenizer.encode_plus(
snake_case_ , snake_case_ , add_special_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , )
__lowercase = encoded_sequence_dict["""input_ids"""]
__lowercase = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
__lowercase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case_ )
]
__lowercase = [x for x in filtered_sequence if x is not None]
self.assertEqual(snake_case_ , snake_case_ )
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case_ )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
snake_case_ , )
self.assertEqual(snake_case_ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('test_opt' )
__lowercase = AutoTokenizer.from_pretrained('./test_opt' )
__lowercase = tokenizer.encode(
snake_case_ , )
self.assertEqual(snake_case_ , [2, 250, 1_345, 9, 10, 4_758] )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=snake_case_ )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
snake_case_ , )
# Same as above
self.assertEqual(snake_case_ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case_ )
__lowercase = """bos"""
__lowercase = tokenizer.get_vocab()["""bos"""]
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
snake_case_ , )
# We changed the bos token
self.assertEqual(snake_case_ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('./tok' )
__lowercase = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__lowercase = tokenizer.encode(
snake_case_ , )
self.assertEqual(snake_case_ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 375
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) < 2:
return collection
def circle_sort_util(_lowercase : list , _lowercase : int , _lowercase : int ) -> bool:
__UpperCAmelCase: Tuple = False
if low == high:
return swapped
__UpperCAmelCase: int = low
__UpperCAmelCase: int = high
while left < right:
if collection[left] > collection[right]:
__UpperCAmelCase, __UpperCAmelCase: List[str] = (
collection[right],
collection[left],
)
__UpperCAmelCase: Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__UpperCAmelCase, __UpperCAmelCase: str = (
collection[right + 1],
collection[left],
)
__UpperCAmelCase: Union[str, Any] = True
__UpperCAmelCase: List[str] = low + int((high - low) / 2 )
__UpperCAmelCase: Optional[int] = circle_sort_util(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[Any] = circle_sort_util(_lowercase , mid + 1 , _lowercase )
return swapped or left_swap or right_swap
__UpperCAmelCase: str = True
while is_not_sorted is True:
__UpperCAmelCase: Dict = circle_sort_util(_lowercase , 0 , len(_lowercase ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 523
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> List[Any]:
UpperCAmelCase = 'laion/clap-htsat-unfused'
UpperCAmelCase = tempfile.mkdtemp()
def a_ ( self , **lowercase_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def a_ ( self , **lowercase_ ) -> Optional[int]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def a_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def a_ ( self ) -> str:
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def a_ ( self ) -> int:
UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def a_ ( self ) -> int:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase = floats_list((3, 1_0_0_0) )
UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' )
UpperCAmelCase = processor(audios=lowercase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ) -> Any:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = processor(text=lowercase_ )
UpperCAmelCase = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> Dict:
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 718
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__SCREAMING_SNAKE_CASE : str = None # compression type in fsspec. ex: "gzip"
__SCREAMING_SNAKE_CASE : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowercase_ = "" , lowercase_ = None , lowercase_ = None , **lowercase_ ) -> str:
super().__init__(self , **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase = fsspec.open(
lowercase_ , mode='rb' , protocol=lowercase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase = os.path.basename(self.file.path.split('::' )[0] )
UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCAmelCase = None
@classmethod
def a_ ( cls , lowercase_ ) -> Union[str, Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def a_ ( self ) -> int:
if self.dir_cache is None:
UpperCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
UpperCAmelCase = {f['name']: f}
def a_ ( self , lowercase_ ) -> Any:
return self.file.open().read()
def a_ ( self , lowercase_ , lowercase_ = "rb" , lowercase_=None , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
UpperCAmelCase = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = "bz2"
__SCREAMING_SNAKE_CASE : Union[str, Any] = "bz2"
__SCREAMING_SNAKE_CASE : Union[str, Any] = ".bz2"
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "gzip"
__SCREAMING_SNAKE_CASE : Any = "gzip"
__SCREAMING_SNAKE_CASE : List[str] = ".gz"
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "lz4"
__SCREAMING_SNAKE_CASE : List[Any] = "lz4"
__SCREAMING_SNAKE_CASE : Tuple = ".lz4"
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "xz"
__SCREAMING_SNAKE_CASE : Union[str, Any] = "xz"
__SCREAMING_SNAKE_CASE : List[Any] = ".xz"
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "zstd"
__SCREAMING_SNAKE_CASE : int = "zstd"
__SCREAMING_SNAKE_CASE : Tuple = ".zst"
def __init__( self , lowercase_ , lowercase_ = "rb" , lowercase_ = None , lowercase_ = None , lowercase_ = DEFAULT_BLOCK_SIZE , **lowercase_ , ) -> str:
super().__init__(
fo=lowercase_ , mode=lowercase_ , target_protocol=lowercase_ , target_options=lowercase_ , block_size=lowercase_ , **lowercase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase = self.file.__enter__
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = file_
def __enter__( self ) -> List[str]:
self._file.__enter__()
return self
def __exit__( self , *lowercase_ , **lowercase_ ) -> List[str]:
self._file.__exit__(*lowercase_ , **lowercase_ )
def __iter__( self ) -> Optional[int]:
return iter(self._file )
def a_ ( self ) -> Tuple:
return next(self._file )
def __getattr__( self , lowercase_ ) -> Optional[int]:
return getattr(self._file , lowercase_ )
def fixed_enter(*lowercase_ , **lowercase_ ):
return WrappedFile(_enter(*lowercase_ , **lowercase_ ) )
UpperCAmelCase = fixed_enter
| 183
| 0
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.array )-> Dict:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A = parser.parse_args()
if args.model_type == "bert":
A = BertForMaskedLM.from_pretrained(args.model_name)
A = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
A = model.state_dict()
A = {}
for w in ["word_embeddings", "position_embeddings"]:
A = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A = state_dict['cls.predictions.decoder.weight']
A = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
A = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 320
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = {}
def __lowercase ( self , lowercase , lowercase , lowercase=1) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowercase):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
a__ : Optional[Any] = [[w, v]]
if not self.graph.get(lowercase):
a__ : Optional[Any] = []
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return list(self.graph)
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase)
def __lowercase ( self , lowercase=-2 , lowercase=-1) -> Any:
'''simple docstring'''
if s == d:
return []
a__ : str = []
a__ : Optional[Any] = []
if s == -2:
a__ : Tuple = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
a__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase) != 0:
a__ : List[Any] = stack[len(lowercase) - 1]
else:
a__ : int = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return visited
def __lowercase ( self , lowercase=-1) -> Any:
'''simple docstring'''
if c == -1:
a__ : int = floor(random() * 1_0000) + 10
for i in range(lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
a__ : int = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1)
def __lowercase ( self , lowercase=-2) -> str:
'''simple docstring'''
a__ : Optional[Any] = deque()
a__ : str = []
if s == -2:
a__ : Optional[int] = list(self.graph)[0]
d.append(lowercase)
visited.append(lowercase)
while d:
a__ : int = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
a__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowercase ( self , lowercase) -> Tuple:
'''simple docstring'''
return len(self.graph[u])
def __lowercase ( self , lowercase=-2) -> Tuple:
'''simple docstring'''
a__ : Dict = []
a__ : Optional[int] = []
if s == -2:
a__ : List[str] = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : Dict = s
a__ : Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(lowercase) != 0:
a__ : Union[str, Any] = stack[len(lowercase) - 1]
else:
a__ : str = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return sorted_nodes
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = []
a__ : Union[str, Any] = []
a__ : str = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : Tuple = -2
a__ : List[Any] = []
a__ : Dict = s
a__ : Tuple = False
a__ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__ : Dict = len(lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ : Dict = True
if len(lowercase) != 0:
a__ : int = stack[len(lowercase) - 1]
else:
a__ : str = False
indirect_parents.append(lowercase)
a__ : List[str] = s
a__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return list(lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = []
a__ : Dict = []
a__ : Union[str, Any] = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : Optional[Any] = -2
a__ : str = []
a__ : Any = s
a__ : List[Any] = False
a__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__ : Dict = len(lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ : Union[str, Any] = True
if len(lowercase) != 0:
a__ : List[Any] = stack[len(lowercase) - 1]
else:
a__ : Dict = False
indirect_parents.append(lowercase)
a__ : List[str] = s
a__ : Any = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return False
def __lowercase ( self , lowercase=-2 , lowercase=-1) -> List[Any]:
'''simple docstring'''
a__ : Dict = time()
self.dfs(lowercase , lowercase)
a__ : int = time()
return end - begin
def __lowercase ( self , lowercase=-2) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = time()
self.bfs(lowercase)
a__ : str = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self) -> List[Any]:
'''simple docstring'''
a__ : int = {}
def __lowercase ( self , lowercase , lowercase , lowercase=1) -> Dict:
'''simple docstring'''
if self.graph.get(lowercase):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
a__ : List[str] = [[w, v]]
# add the other way
if self.graph.get(lowercase):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
a__ : Dict = [[w, u]]
def __lowercase ( self , lowercase , lowercase) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase)
# the other way round
if self.graph.get(lowercase):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase)
def __lowercase ( self , lowercase=-2 , lowercase=-1) -> Optional[int]:
'''simple docstring'''
if s == d:
return []
a__ : Any = []
a__ : List[Any] = []
if s == -2:
a__ : str = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Any = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
a__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase) != 0:
a__ : int = stack[len(lowercase) - 1]
else:
a__ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return visited
def __lowercase ( self , lowercase=-1) -> List[Any]:
'''simple docstring'''
if c == -1:
a__ : Tuple = floor(random() * 1_0000) + 10
for i in range(lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
a__ : Dict = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1)
def __lowercase ( self , lowercase=-2) -> List[Any]:
'''simple docstring'''
a__ : List[str] = deque()
a__ : Union[str, Any] = []
if s == -2:
a__ : Any = list(self.graph)[0]
d.append(lowercase)
visited.append(lowercase)
while d:
a__ : List[Any] = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u])
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = []
a__ : List[str] = []
a__ : str = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : Tuple = -2
a__ : Union[str, Any] = []
a__ : Optional[int] = s
a__ : int = False
a__ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__ : int = len(lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ : int = True
if len(lowercase) != 0:
a__ : int = stack[len(lowercase) - 1]
else:
a__ : Optional[int] = False
indirect_parents.append(lowercase)
a__ : Optional[Any] = s
a__ : int = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return list(lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Any = []
a__ : int = []
a__ : Tuple = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__ : Union[str, Any] = -2
a__ : Dict = []
a__ : int = s
a__ : str = False
a__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__ : str = len(lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__ : Dict = True
if len(lowercase) != 0:
a__ : Tuple = stack[len(lowercase) - 1]
else:
a__ : str = False
indirect_parents.append(lowercase)
a__ : int = s
a__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return False
def __lowercase ( self) -> Dict:
'''simple docstring'''
return list(self.graph)
def __lowercase ( self , lowercase=-2 , lowercase=-1) -> Tuple:
'''simple docstring'''
a__ : Any = time()
self.dfs(lowercase , lowercase)
a__ : Tuple = time()
return end - begin
def __lowercase ( self , lowercase=-2) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = time()
self.bfs(lowercase)
a__ : int = time()
return end - begin
| 713
|
def A_ ( A__ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ : Any = [True] * (num + 1)
a__ : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
a__ : Tuple = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 392
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( a_ ):
__a : str = '''git_vision_model'''
def __init__( self , lowercase=7_6_8 , lowercase=3_0_7_2 , lowercase=1_2 , lowercase=1_2 , lowercase=3 , lowercase=2_2_4 , lowercase=1_6 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.0_2 , **lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Any = patch_size
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = attention_dropout
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
@classmethod
def _snake_case ( cls , lowercase , **lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__SCREAMING_SNAKE_CASE : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class SCREAMING_SNAKE_CASE_ ( a_ ):
__a : Tuple = '''git'''
def __init__( self , lowercase=None , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=6 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_0_2_4 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=0 , lowercase="absolute" , lowercase=True , lowercase=False , lowercase=1_0_1 , lowercase=1_0_2 , lowercase=None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , pad_token_id=lowercase_ , **lowercase_ )
if vision_config is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : int = position_embedding_type
__SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE : int = tie_word_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
__SCREAMING_SNAKE_CASE : int = eos_token_id
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : str = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 158
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__a: Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__a: List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a: Optional[int] = False
__a: Tuple = False
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = OpenLlamaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'single_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'multi_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase_ = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
| 318
| 0
|
from __future__ import annotations
UpperCAmelCase__ : Union[str, Any] = []
def __lowercase ( _A , _A , _A ) -> bool:
for i in range(len(_A ) ):
if board[row][i] == 1:
return False
for i in range(len(_A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _A , _A ) -> bool:
if row >= len(_A ):
solution.append(_A )
printboard(_A )
print()
return True
for i in range(len(_A ) ):
if is_safe(_A , _A , _A ):
SCREAMING_SNAKE_CASE : List[str] = 1
solve(_A , row + 1 )
SCREAMING_SNAKE_CASE : Dict = 0
return False
def __lowercase ( _A ) -> None:
for i in range(len(_A ) ):
for j in range(len(_A ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase__ : Any = 8
UpperCAmelCase__ : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""gpt_neox_japanese"""
def __init__( self : Any , UpperCAmelCase__ : Any=3_2_0_0_0 , UpperCAmelCase__ : Dict=2_5_6_0 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[int]=1.00 , UpperCAmelCase__ : List[Any]=1_0_0_0_0 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=3_1_9_9_6 , UpperCAmelCase__ : Tuple=3_1_9_9_9 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_multiple_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = rotary_pct
SCREAMING_SNAKE_CASE : Tuple = rotary_emb_base
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = hidden_dropout
| 446
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowercase_ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowercase_ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a ( A__ : Optional[int] , A__ : Dict ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def a ( A__ : Any , A__ : int ) -> Any:
"""simple docstring"""
_lowercase =simple_accuracy(A__ , A__ )
_lowercase =float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( A__ : Any , A__ : List[Any] ) -> int:
"""simple docstring"""
_lowercase =float(pearsonr(A__ , A__ )[0] )
_lowercase =float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase , lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCAmelCase , lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCAmelCase , lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCAmelCase , lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 291
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =load_tool('text-question-answering' )
self.tool.setup()
_lowercase =load_tool('text-question-answering' , remote=lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.remote_tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.remote_tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
| 291
| 1
|
'''simple docstring'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = name
_lowerCAmelCase = value
_lowerCAmelCase = weight
def __repr__( self ):
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _lowercase ( self ):
"""simple docstring"""
return self.value
def _lowercase ( self ):
"""simple docstring"""
return self.name
def _lowercase ( self ):
"""simple docstring"""
return self.weight
def _lowercase ( self ):
"""simple docstring"""
return self.value / self.weight
def A (__lowerCamelCase :Any , __lowerCamelCase :Any , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any ):
_lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowercase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
| 0
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = (IPNDMScheduler,)
__magic_name__ = (('num_inference_steps', 50),)
def a_ ( self , **__snake_case ):
snake_case = {'''num_train_timesteps''': 1_0_0_0}
config.update(**__snake_case )
return config
def a_ ( self , __snake_case=0 , **__snake_case ):
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , __snake_case )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config(**__snake_case )
snake_case = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
snake_case = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ ( self ):
pass
def a_ ( self , __snake_case=0 , **__snake_case ):
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , __snake_case )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
snake_case = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a_ ( self , **__snake_case ):
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**__snake_case )
snake_case = scheduler_class(**__snake_case )
snake_case = 1_0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(__snake_case , __snake_case )
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(__snake_case , __snake_case )
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def a_ ( self ):
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , __snake_case )
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**__snake_case )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , '''set_timesteps''' ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , '''set_timesteps''' ):
snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case = dummy_past_residuals[:]
snake_case = scheduler.timesteps[5]
snake_case = scheduler.timesteps[6]
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
snake_case = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a_ ( self ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case )
def a_ ( self ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case )
def a_ ( self ):
snake_case = self.full_loop()
snake_case = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 550
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = '''ZinengTang/tvlt-base'''
snake_case = tempfile.mkdtemp()
def a_ ( self , **__snake_case ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self , **__snake_case ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __snake_case )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = feature_extractor(__snake_case , return_tensors='''np''' )
snake_case = processor(audio=__snake_case , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
snake_case = np.ones([1_2_0_0_0] )
snake_case = np.ones([3, 2_2_4, 2_2_4] )
snake_case = processor(audio=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_feature_extractor()
snake_case = TvltProcessor(image_processor=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 550
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class snake_case_ ( __lowercase ):
A_ = 'vit'
def __init__( self : str , _snake_case : Tuple=768 , _snake_case : List[Any]=12 , _snake_case : Dict=12 , _snake_case : int=3072 , _snake_case : Tuple="gelu" , _snake_case : str=0.0 , _snake_case : int=0.0 , _snake_case : List[str]=0.02 , _snake_case : Tuple=1E-12 , _snake_case : List[str]=224 , _snake_case : List[Any]=16 , _snake_case : int=3 , _snake_case : str=True , _snake_case : str=16 , **_snake_case : int , )->str:
'''simple docstring'''
super().__init__(**_snake_case )
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : List[str] = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : List[str] = patch_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[str] = qkv_bias
__lowerCAmelCase : Union[str, Any] = encoder_stride
class snake_case_ ( __lowercase ):
A_ = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Tuple )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] )->float:
'''simple docstring'''
return 1E-4
| 703
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=() , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :int="no" , SCREAMING_SNAKE_CASE :Dict="29500" ) -> Dict:
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : int = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__lowerCAmelCase : int = True
elif "IPython" in sys.modules:
__lowerCAmelCase : Optional[int] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__lowerCAmelCase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__lowerCAmelCase : List[Any] = 8
__lowerCAmelCase : List[str] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCAmelCase : Optional[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=() , SCREAMING_SNAKE_CASE :Optional[int]=2 ) -> Dict:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__lowerCAmelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE )
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
| 240
| 0
|
"""simple docstring"""
from typing import Any
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) ->list:
"""simple docstring"""
_validation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# Creates data structures and fill initial step
__UpperCAmelCase : dict = {}
__UpperCAmelCase : dict = {}
for state in states_space:
__UpperCAmelCase : int = observations_space[0]
__UpperCAmelCase : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Tuple = observations_space[o]
__UpperCAmelCase : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase : List[Any] = ''''''
__UpperCAmelCase : Tuple = -1
for k_state in states_space:
__UpperCAmelCase : str = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase : Any = probability
__UpperCAmelCase : Optional[int] = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase : Dict = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase : str = arg_max
# The final observation
__UpperCAmelCase : Tuple = observations_space[len(UpperCAmelCase_ ) - 1]
# argmax for given final observation
__UpperCAmelCase : Union[str, Any] = ''''''
__UpperCAmelCase : Optional[int] = -1
for k_state in states_space:
__UpperCAmelCase : Union[str, Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase : Any = probability
__UpperCAmelCase : Optional[Any] = k_state
__UpperCAmelCase : Optional[int] = arg_max
# Process pointers backwards
__UpperCAmelCase : int = last_state
__UpperCAmelCase : Optional[int] = []
for o in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
result.append(UpperCAmelCase_ )
__UpperCAmelCase : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) ->None:
"""simple docstring"""
_validate_not_empty(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
_validate_lists(UpperCAmelCase_ , UpperCAmelCase_ )
_validate_dicts(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) ->None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->None:
"""simple docstring"""
_validate_list(UpperCAmelCase_ , '''observations_space''' )
_validate_list(UpperCAmelCase_ , '''states_space''' )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->None:
"""simple docstring"""
if not isinstance(_object , UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = f'''{var_name} must be a list'''
raise ValueError(UpperCAmelCase_ )
else:
for x in _object:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) ->None:
"""simple docstring"""
_validate_dict(UpperCAmelCase_ , '''initial_probabilities''' , UpperCAmelCase_ )
_validate_nested_dict(UpperCAmelCase_ , '''transition_probabilities''' )
_validate_nested_dict(UpperCAmelCase_ , '''emission_probabilities''' )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->None:
"""simple docstring"""
_validate_dict(_object , UpperCAmelCase_ , UpperCAmelCase_ )
for x in _object.values():
_validate_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ) ->None:
"""simple docstring"""
if not isinstance(_object , UpperCAmelCase_ ):
__UpperCAmelCase : int = f'''{var_name} must be a dict'''
raise ValueError(UpperCAmelCase_ )
if not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for x in _object ):
__UpperCAmelCase : Any = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCAmelCase_ )
if not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for x in _object.values() ):
__UpperCAmelCase : Optional[Any] = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase : List[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_A : Union[str, Any] = 'CIDAS/clipseg-rd64-refined'
_A : Tuple = 'image_segmenter'
_A : List[Any] = CLIPSegForImageSegmentation
_A : List[str] = ['image', 'text']
_A : Optional[int] = ['image']
def __init__( self : List[str] , *__lowercase : Union[str, Any] , **__lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def A_ ( self : int , __lowercase : "Image" , __lowercase : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__lowercase , return_tensors='''pt''' )
def A_ ( self : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
with torch.no_grad():
__UpperCAmelCase : List[str] = self.model(**__lowercase ).logits
return logits
def A_ ( self : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = outputs.cpu().detach().numpy()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Any = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 522
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowercase (yaml.SafeLoader ):
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCamelCase__ : str = [tuple(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else key for key in keys]
UpperCamelCase__ : int = Counter(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}')
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False):
UpperCamelCase__ : Union[str, Any] = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_)
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_)
return mapping
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple[Optional[str], str]:
UpperCamelCase__ : str = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCamelCase__ : Dict = full_content[1:].index('---') + 1
UpperCamelCase__ : Any = '\n'.join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(lowerCamelCase_)
class __lowercase (__lowerCamelCase ):
# class attributes
_lowerCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : List[str] , UpperCAmelCase_ : Path):
with open(UpperCAmelCase_ , encoding='utf-8') as readme_file:
UpperCamelCase__ : Any = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_)
else:
return cls()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Path):
if path.exists():
with open(UpperCAmelCase_ , encoding='utf-8') as readme_file:
UpperCamelCase__ : Tuple = readme_file.read()
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : List[Any] = self._to_readme(UpperCAmelCase_)
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as readme_file:
readme_file.write(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[str] = None):
if readme_content is not None:
UpperCamelCase__ : Any = _split_yaml_from_readme(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
UpperCamelCase__ : List[str] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : str):
UpperCamelCase__ : Any = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCamelCase__ : int = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='utf-8' , ).decode('utf-8')
lowerCAmelCase__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCAmelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowerCAmelCase__ = ap.parse_args()
lowerCAmelCase__ = Path(args.readme_filepath)
lowerCAmelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 703
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : List[str] = max_length
a__ : Dict = max_position_embeddings
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
a__ : List[Any] = input_ids.shape[-1]
a__ : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
"exceptions, performance degradation, or nothing at all." )
return is_done
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
"with `max_length = start_length + max_new_tokens` instead." , __UpperCAmelCase , )
a__ : List[str] = start_length
a__ : Any = max_new_tokens
a__ : Any = start_length + max_new_tokens
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : List[Any] = max_time
a__ : Any = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return any(criteria(__UpperCAmelCase , __UpperCAmelCase ) for criteria in self )
@property
def _A ( self ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return stopping_criterium.max_length
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> StoppingCriteriaList:
a__ : str = stopping_criteria.max_length
a__ : List[Any] = deepcopy(__UpperCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , __UpperCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCamelCase ) )
return new_stopping_criteria
| 191
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> List[Any]:
a__ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Optional[Any]:
a__ , a__ : Optional[int] = emb.weight.shape
a__ : Union[str, Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
a__ : str = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=None ) -> Optional[int]:
a__ : Union[str, Any] = {}
for old_key in state_dict.keys():
a__ : List[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : List[str] = key.replace("moe_layer.experts.0" , F'ffn.experts.expert_{expert_idx}' )
else:
a__ : Optional[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
a__ : Optional[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
a__ : int = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
a__ : Any = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
a__ : str = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
a__ : Optional[int] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
a__ : int = key.replace("final_layer_norm" , "ff_layer_norm" )
a__ : Optional[int] = state_dict[old_key]
return new_dict
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> Dict:
a__ : Any = []
a__ : List[str] = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
for expert in range(__UpperCamelCase ):
a__ : Union[str, Any] = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(__UpperCamelCase ):
a__ : str = torch.load(__UpperCamelCase )["model"]
remove_ignore_keys_(__UpperCamelCase )
a__ : Tuple = rename_fairseq_keys(__UpperCamelCase , __UpperCamelCase )
a__ : str = os.path.join(
__UpperCamelCase , weights_name.replace(".bin" , F'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) )
torch.save(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCamelCase )[0]].dtype )
# Add the last block
a__ : int = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'-{len(__UpperCamelCase )+1:05d}-of-???.bin' ) )
a__ : Any = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__UpperCamelCase )
a__ : Optional[Any] = rename_fairseq_keys(__UpperCamelCase , __UpperCamelCase )
a__ : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCamelCase ) == 1:
a__ : Union[str, Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCamelCase , __UpperCamelCase )
# Otherwise, let's build the index
a__ : Any = {}
for idx, shard in enumerate(__UpperCamelCase ):
a__ : Any = weights_name.replace(".bin" , F'-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin' )
a__ : Optional[Any] = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
for key in shard:
a__ : Any = shard_file
# Add the metadata
a__ : Optional[int] = {"total_size": total_size}
a__ : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
a__ : Any = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCamelCase = parser.parse_args()
lowerCamelCase , lowerCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
lowerCamelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 191
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( __A : Optional[int] , __A : Tuple , __A : Any , __A : List[Any] , __A : Optional[int] ) -> Tuple:
'''simple docstring'''
with open(__A ) as metadata_file:
snake_case : Union[str, Any] = json.load(__A )
snake_case : Any = LukeConfig(use_entity_aware_attention=__A , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
snake_case : Dict = load_original_entity_vocab(__A )
# add an entry for [MASK2]
snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Any = AddedToken("""<ent>""" , lstrip=__A , rstrip=__A )
snake_case : str = AddedToken("""<ent2>""" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , """tokenizer_config.json""" ) , """r""" ) as f:
snake_case : int = json.load(__A )
snake_case : Any = """MLukeTokenizer"""
with open(os.path.join(__A , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__A , __A )
snake_case : Any = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
snake_case : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
snake_case : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Dict = word_emb[enta_init_index].unsqueeze(0 )
snake_case : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Any = state_dict[bias_name]
snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
snake_case : List[str] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case : Dict = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Dict = state_dict["""entity_predictions.bias"""]
snake_case : Dict = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Optional[int] = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
snake_case : Dict = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
snake_case : List[Any] = state_dict[key]
else:
snake_case : str = state_dict[key]
snake_case : int = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : str = MLukeTokenizer.from_pretrained(__A , task="""entity_classification""" )
snake_case : Optional[int] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
snake_case : Dict = (0, 9)
snake_case : Optional[Any] = tokenizer(__A , entity_spans=[span] , return_tensors="""pt""" )
snake_case : Any = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 33, 768) )
snake_case : List[str] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : List[str] = torch.Size((1, 1, 768) )
snake_case : Optional[Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[Any] = MLukeTokenizer.from_pretrained(__A )
snake_case : List[str] = """Tokyo is the capital of <mask>."""
snake_case : Tuple = (24, 30)
snake_case : Optional[Any] = tokenizer(__A , entity_spans=[span] , return_tensors="""pt""" )
snake_case : Any = model(**__A )
snake_case : str = encoding["""input_ids"""][0].tolist()
snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
snake_case : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
snake_case : int = outputs.entity_logits[0][0].argmax().item()
snake_case : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__A ) )
model.save_pretrained(__A )
def lowercase ( __A : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
snake_case : Optional[int] = [json.loads(__A ) for line in open(__A )]
snake_case : Union[str, Any] = {}
for entry in data:
snake_case : str = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Optional[int] = entity_id
break
snake_case : List[Any] = f"""{language}:{entity_name}"""
snake_case : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 718
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = max_length
snake_case : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : Tuple = start_length
snake_case : List[str] = max_new_tokens
snake_case : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[str] = max_time
snake_case : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 315
| 0
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__magic_name__ : Dict =logging.get_logger(__name__)
class UpperCamelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = None
@experimental
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return _map_with_joblib(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = num_proc if num_proc <= len(lowerCamelCase_ ) else len(lowerCamelCase_ )
__magic_name__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCamelCase_ ):
__magic_name__ = len(lowerCamelCase_ ) // num_proc
__magic_name__ = len(lowerCamelCase_ ) % num_proc
__magic_name__ = div * index + min(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCamelCase_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(lowerCamelCase_ )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(lowerCamelCase_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
__magic_name__ , __magic_name__ = None, None
if not disable_tqdm:
__magic_name__ , __magic_name__ = (RLock(),), tqdm.set_lock
with Pool(lowerCamelCase_ , initargs=lowerCamelCase_ , initializer=lowerCamelCase_ ) as pool:
__magic_name__ = pool.map(lowerCamelCase_ , lowerCamelCase_ )
logger.info(F'Finished {num_proc} processes' )
__magic_name__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(lowerCamelCase_ )} objects' )
return mapped
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCamelCase_ ):
return joblib.Parallel()(
joblib.delayed(lowerCamelCase_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__magic_name__ = None
| 664
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=[10, 20, 30, 40] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embeddings_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = len(_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = TFRegNetModel(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , training=_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFRegNetForImageClassification(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = TFRegNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _snake_case ( self ) -> int:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _snake_case ( self ) -> Union[str, Any]:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase = layer_type
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase={} ):
_lowerCAmelCase = model(_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCAmelCase , _lowerCAmelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"output_hidden_states": True} )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"output_hidden_states": True} )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> int:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFRegNetModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
_lowerCAmelCase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
| 707
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "T5Config"
def __a(SCREAMING_SNAKE_CASE_ : jnp.array , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_lowerCAmelCase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Any = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Dict = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = "mt5"
__lowerCamelCase : str = MTaConfig
| 489
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = ['input_features', 'is_longer']
def __init__( self:List[Any] , _a:Dict=64 , _a:List[str]=4_80_00 , _a:str=4_80 , _a:Tuple=10 , _a:Dict=10_24 , _a:Any=0.0 , _a:List[Any]=False , _a:float = 0 , _a:float = 1_40_00 , _a:int = None , _a:str = "fusion" , _a:str = "repeatpad" , **_a:str , ):
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
snake_case__ = top_db
snake_case__ = truncation
snake_case__ = padding
snake_case__ = fft_window_size
snake_case__ = (fft_window_size >> 1) + 1
snake_case__ = hop_length
snake_case__ = max_length_s
snake_case__ = max_length_s * sampling_rate
snake_case__ = sampling_rate
snake_case__ = frequency_min
snake_case__ = frequency_max
snake_case__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm=_a , mel_scale='''htk''' , )
snake_case__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm='''slaney''' , mel_scale='''slaney''' , )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE__ ( self:int , _a:np.array , _a:Optional[np.array] = None ):
snake_case__ = spectrogram(
_a , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_a , log_mel='''dB''' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[str] , _a:Dict , _a:int ):
snake_case__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ = [0]
# randomly choose index for each part
snake_case__ = np.random.choice(ranges[0] )
snake_case__ = np.random.choice(ranges[1] )
snake_case__ = np.random.choice(ranges[2] )
snake_case__ = mel[idx_front : idx_front + chunk_frames, :]
snake_case__ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case__ = mel[idx_back : idx_back + chunk_frames, :]
snake_case__ = torch.tensor(mel[None, None, :] )
snake_case__ = torch.nn.functional.interpolate(
_a , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_a )
snake_case__ = mel_shrink[0][0].numpy()
snake_case__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:np.array , _a:Dict , _a:List[str] , _a:Union[str, Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case__ = len(_a ) - max_length
snake_case__ = np.random.randint(0 , overflow + 1 )
snake_case__ = waveform[idx : idx + max_length]
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters )
snake_case__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case__ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case__ = False
else:
snake_case__ = self._random_mel_fusion(_a , _a , _a )
snake_case__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
snake_case__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case__ = int(max_length / len(_a ) )
snake_case__ = np.stack(np.tile(_a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case__ = int(max_length / len(_a ) )
snake_case__ = np.stack(np.tile(_a , _a ) )
snake_case__ = np.pad(_a , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters )
snake_case__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self:Union[str, Any] , _a:Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _a:str = None , _a:Optional[str] = None , _a:Optional[int] = None , _a:Optional[int] = None , _a:Optional[Union[str, TensorType]] = None , **_a:int , ):
snake_case__ = truncation if truncation is not None else self.truncation
snake_case__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case__ = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
snake_case__ = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case__ = [
self._get_input_mel(_a , max_length if max_length else self.nb_max_samples , _a , _a )
for waveform in raw_speech
]
snake_case__ = []
snake_case__ = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case__ = np.random.randint(0 , len(_a ) )
snake_case__ = True
if isinstance(input_mel[0] , _a ):
snake_case__ = [np.asarray(_a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case__ = [[longer] for longer in is_longer]
snake_case__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
snake_case__ = BatchFeature(_a )
if return_tensors is not None:
snake_case__ = input_features.convert_to_tensors(_a )
return input_features
| 33
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids, __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507
| 0
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
return sum(i for i in range(1 , number // 2 + 1) if number % i == 0) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__magic_name__ = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 713
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27
| 0
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ): # noqa: E741
while r - l > 1:
UpperCAmelCase__: Tuple = (l + r) // 2
if v[m] >= key:
UpperCAmelCase__: Optional[Any] = m
else:
UpperCAmelCase__: Any = m # noqa: E741
return r
def _A ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 0:
return 0
UpperCAmelCase__: str = [0] * len(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Dict = 1
UpperCAmelCase__: List[Any] = v[0]
for i in range(1 ,len(SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
UpperCAmelCase__: int = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase__: Union[str, Any] = v[i]
length += 1
else:
UpperCAmelCase__: List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __UpperCamelCase ( _a ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__: List[str] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _UpperCAmelCase ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase__: Union[str, Any] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _UpperCAmelCase ( self ):
import PIL.Image
UpperCAmelCase__: int = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=lowerCamelCase__ ) as mock_cast_to_python_objects:
UpperCAmelCase__: Tuple = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , lowerCamelCase__ )
self.assertFalse(kwargs["optimize_list_casting"] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = pa.BufferReader(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE ,pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: str = pa.ipc.open_stream(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
UpperCAmelCase__: Tuple = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _A ( ):
UpperCAmelCase__: Tuple = pa.BufferOutputStream()
UpperCAmelCase__: Any = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,features=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase__: Dict = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: Any = pa.ipc.open_stream(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: pa.Table = f.read_all()
UpperCAmelCase__: Union[str, Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE ):
writer.write({"col_1": "foo", "col_2": 1} ,key=[1, 2] )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE ):
writer.write({"col_1": "foo", "col_2": 1} ,key=1_0 )
writer.write({"col_1": "bar", "col_2": 2} ,key=1_0 )
UpperCAmelCase__ , UpperCAmelCase__: str = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 1_0] )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ,hash_salt="split_name" ,check_duplicates=SCREAMING_SNAKE_CASE ,) as writer:
writer.write({"col_1": "foo", "col_2": 1} ,key=1 )
writer.write({"col_1": "bar", "col_2": 2} ,key=2 )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = pa.BufferOutputStream()
UpperCAmelCase__: Optional[Any] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
UpperCAmelCase__ , UpperCAmelCase__: int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Optional[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = pa.BufferOutputStream()
UpperCAmelCase__: List[str] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: str = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 1_0] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = pa.BufferOutputStream()
UpperCAmelCase__: List[Any] = pa.schema(SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ,schema=SCREAMING_SNAKE_CASE ,writer_batch_size=SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
UpperCAmelCase__ , UpperCAmelCase__: Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase__: Dict = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _A ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: List[str] = {"col_1": pa.string(), "col_2": pa.intaa()}
UpperCAmelCase__: str = os.path.join(SCREAMING_SNAKE_CASE ,"test.arrow" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE ,schema=pa.schema(SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE ,metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE ,1 )
def _A ( SCREAMING_SNAKE_CASE ):
if pa.types.is_list(SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if isinstance(lst[0] ,SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] ,SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__: Dict = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" ,[(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = pa.array(TypedSequence(SCREAMING_SNAKE_CASE ,optimized_int_type=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" ,[
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] ,)
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
# in range
UpperCAmelCase__: List[Any] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE ,col=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase__: Dict = copy.deepcopy(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Tuple = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE ,col=SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" ,[False, True] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = "mock://dataset-train.arrow"
with ArrowWriter(path=SCREAMING_SNAKE_CASE ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE )
def _A ( ):
UpperCAmelCase__: Dict = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCAmelCase__ , UpperCAmelCase__: str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase__: str = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: pa.Table = pq.read_table(SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" ,[False, True] )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
import PIL.Image
UpperCAmelCase__: List[str] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE ,format="png" )
UpperCAmelCase__: List[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE ,features=Features({"image": Image()} ) ,embed_local_files=SCREAMING_SNAKE_CASE ) as writer:
writer.write({"image": image_path} )
writer.finalize()
UpperCAmelCase__: Optional[int] = pa.BufferReader(output.getvalue() )
UpperCAmelCase__: pa.Table = pq.read_table(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Dict = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] ,SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE ,"rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _A ( ):
UpperCAmelCase__: List[str] = pa.schema([pa.field("col_1" ,pa.string() ,nullable=SCREAMING_SNAKE_CASE )] )
UpperCAmelCase__: Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field("col_1" ,pa.string() )] )
| 113
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = tempfile.mkdtemp()
__a : Any = BlipImageProcessor()
__a : List[Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__a : str = BlipProcessor(__UpperCamelCase , __UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer
def __lowerCamelCase ( self , **__UpperCamelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor
def __lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Dict = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a : str = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__a : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Optional[Any] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : List[Any] = self.prepare_image_inputs()
__a : List[str] = image_processor(__UpperCamelCase , return_tensors="""np""" )
__a : Optional[int] = processor(images=__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Tuple = self.get_tokenizer()
__a : Tuple = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Optional[Any] = """lower newer"""
__a : Optional[int] = processor(text=__UpperCamelCase )
__a : List[str] = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Tuple = """lower newer"""
__a : Any = self.prepare_image_inputs()
__a : Optional[int] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[str] = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Dict = processor.batch_decode(__UpperCamelCase )
__a : Optional[int] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = BlipProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__a : Dict = """lower newer"""
__a : List[str] = self.prepare_image_inputs()
__a : List[Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 697
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t
| 697
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
A__ : Optional[int] = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
A__ : Optional[int] = Features({"image": Image()} )
A__ : int = Features({"labels": ClassLabel} )
A__ : Tuple = "image"
A__ : Tuple = "labels"
def _a ( self : Dict , _snake_case : str ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def _a ( self : int ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 9
|
"""simple docstring"""
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 65
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=768 ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = proj_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PaintByExampleMapper(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE_ : Tuple = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE_ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model(pixel_values=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ : List[str] = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.final_layer_norm(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.proj_out(_SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _A ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ : str = config.hidden_size
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation_fn='gelu' , attention_bias=_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE )
] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for block in self.blocks:
SCREAMING_SNAKE_CASE_ : Any = block(_SCREAMING_SNAKE_CASE )
return hidden_states
| 707
|
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def A_ ( a = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = pre_numerator
SCREAMING_SNAKE_CASE_ : str = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE_ : Tuple = cur_numerator
SCREAMING_SNAKE_CASE_ : Tuple = e_cont * pre_numerator + temp
return sum_digits(a )
if __name__ == "__main__":
print(F'{solution() = }')
| 353
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : List[Any] = 32
def UpperCAmelCase_ ( A , A = 1_6 ):
'''simple docstring'''
_a : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
_a : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : int = datasets.map(
a__ , batched=a__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : List[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Dict = 1_6
elif accelerator.mixed_precision != "no":
_a : str = 8
else:
_a : List[Any] = None
return tokenizer.pad(
a__ , padding='longest' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
_a : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : Optional[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a__ ) == "1":
_a : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_a : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_a : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Tuple = int(config['num_epochs'] )
_a : Tuple = int(config['seed'] )
_a : Tuple = int(config['batch_size'] )
set_seed(a__ )
_a , _a : Tuple = get_dataloaders(a__ , a__ )
_a : List[str] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : str = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
_a : Dict = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=1_0_0 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : List[Any] = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_a : Union[str, Any] = os.path.split(a__ )[-1].split('.' )[0]
accelerator.init_trackers(a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_a : str = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : str = model(**a__ )
_a : List[Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_a : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_a : str = model(**a__ )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a__ , references=a__ , )
_a : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(a__ ),
'epoch': epoch,
} , step=a__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a__ , default=a__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=a__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_a : List[Any] = parser.parse_args()
_a : Optional[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 120
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def lowercase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type="v_prediction" )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=3_50 , )
_UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_UpperCamelCase = "A painting of a squirrel eating a burger"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="np" , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase ( self ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = "a cat sitting on a park bench"
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type="np" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 147
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__UpperCamelCase = TypeVar('''T''')
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = len(__A )
SCREAMING_SNAKE_CASE = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE = fnc
self.build()
def __A ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
p += self.N
SCREAMING_SNAKE_CASE = v
while p > 1:
SCREAMING_SNAKE_CASE = p // 2
SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = l + self.N, r + self.N
SCREAMING_SNAKE_CASE = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE = self.st[l] if res is None else self.fn(__A , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE = self.st[r] if res is None else self.fn(__A , self.st[r] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__UpperCamelCase = SegmentTree(test_array, min)
__UpperCamelCase = SegmentTree(test_array, max)
__UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowercase () -> List[Any]:
for i in range(len(UpperCamelCase__ ) ):
for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE = reduce(UpperCamelCase__ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE = reduce(UpperCamelCase__ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE = reduce(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert max_range == max_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert sum_range == sum_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
__UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 715
|
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 327
| 0
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
def get_matched_characters(__UpperCAmelCase , __UpperCAmelCase ) -> str:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__SCREAMING_SNAKE_CASE = int(max(0 , i - limit ) )
__SCREAMING_SNAKE_CASE = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = f"""{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}"""
return "".join(__UpperCAmelCase )
# matching characters
__SCREAMING_SNAKE_CASE = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# transposition
__SCREAMING_SNAKE_CASE = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase , __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__SCREAMING_SNAKE_CASE = 0.0
else:
__SCREAMING_SNAKE_CASE = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__SCREAMING_SNAKE_CASE = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 109
|
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109
| 1
|
from __future__ import annotations
import math
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCamelCase_ : Optional[int] = math.log(len(_SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 138
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
a__ :List[str] = StableDiffusionLDMaDPipeline
a__ :str = TEXT_TO_IMAGE_PARAMS
a__ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase_ : Tuple = CLIPTextModel(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ (self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> str:
UpperCamelCase_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : List[str] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Optional[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : str = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
UpperCamelCase_ : Optional[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase_ : List[Any] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : int = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase_ : Any = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase_ : Optional[int] = ldmad_pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = text_inputs["""input_ids"""].to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = ldmad_pipe.text_encoder(__UpperCamelCase )[0]
UpperCamelCase_ : Optional[int] = prompt_embeds
# forward
UpperCamelCase_ : Any = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : List[str] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : str = """french fries"""
UpperCamelCase_ : Dict = ldmad_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : List[str] = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
UpperCamelCase_ : Tuple = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> List[str]:
UpperCamelCase_ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Dict = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase_ : Dict = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : str = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Tuple = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase_ : str = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
UpperCamelCase_ : List[str] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> Any:
UpperCamelCase_ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Any = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Any = 0.495_586
UpperCamelCase_ : Dict = 0.33_795_515
UpperCamelCase_ : Optional[int] = 112.48_518
UpperCamelCase_ : Union[str, Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : int = 0.4_194_127
UpperCamelCase_ : Optional[Any] = 0.35_375_586
UpperCamelCase_ : Optional[Any] = 0.5_638_502
UpperCamelCase_ : List[Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 138
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE = (("num_inference_steps", 2_5),)
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> int:
lowercase__ : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def _lowerCAmelCase( self , __lowerCAmelCase=0 , **__lowerCAmelCase ) -> List[str]:
lowercase__ : int = dict(self.forward_default_kwargs )
lowercase__ : List[str] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
lowercase__ : Tuple = self.dummy_sample
lowercase__ : int = 0.1 * sample
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[Any] = self.get_scheduler_config(**__lowerCAmelCase )
lowercase__ : List[Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase__ : Optional[Any] = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Optional[Any] = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase__ : int = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase__ : Dict = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase( self , __lowerCAmelCase=0 , **__lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = dict(self.forward_default_kwargs )
lowercase__ : Dict = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
lowercase__ : Optional[Any] = self.dummy_sample
lowercase__ : str = 0.1 * sample
lowercase__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase__ : List[str] = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase__ : List[str] = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase( self , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Optional[int]:
if scheduler is None:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(**__lowerCAmelCase )
lowercase__ : str = scheduler_class(**__lowerCAmelCase )
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(**__lowerCAmelCase )
lowercase__ : Optional[Any] = scheduler_class(**__lowerCAmelCase )
lowercase__ : Any = 10
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[Any] = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : str = dict(self.forward_default_kwargs )
lowercase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**__lowerCAmelCase )
lowercase__ : str = self.dummy_sample
lowercase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , '''set_timesteps''' ):
lowercase__ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowercase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
lowercase__ : Any = scheduler.timesteps[5]
lowercase__ : Union[str, Any] = scheduler.timesteps[6]
lowercase__ : Optional[int] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase__ : Optional[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCAmelCase( self ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase__ : Optional[int] = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowercase__ : Tuple = self.full_loop(scheduler=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
lowercase__ : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase__ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase__ : Union[str, Any] = self.full_loop(scheduler=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def _lowerCAmelCase( self ) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def _lowerCAmelCase( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
lowercase__ : Union[str, Any] = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def _lowerCAmelCase( self ) -> List[Any]:
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = self.full_loop()
lowercase__ : Optional[int] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : str = self.full_loop(prediction_type='''v_prediction''' )
lowercase__ : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase__ : Optional[int] = scheduler_class(**__lowerCAmelCase )
lowercase__ : List[Any] = 10
lowercase__ : Any = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Any = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Dict:
for scheduler_class in self.scheduler_classes:
lowercase__ : str = self.get_scheduler_config(**__lowerCAmelCase )
lowercase__ : Optional[Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 152
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Any ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCamelCase : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(UpperCAmelCase )
__lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=0 ):
__lowerCamelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCamelCase : str = image / 2 + 0.5
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[Any] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline(**UpperCAmelCase )
__lowerCamelCase : List[str] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : int = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = pipe(**UpperCAmelCase )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Tuple = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase , "half" ):
__lowerCamelCase : str = module.half()
__lowerCamelCase : str = CycleDiffusionPipeline(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : int = pipe(**UpperCAmelCase )
__lowerCamelCase : Any = output.images
__lowerCamelCase : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : List[str] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCamelCase__ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase__ ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase__ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase__ ( self : str ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
__lowerCamelCase : List[str] = init_image.resize((512, 512) )
__lowerCamelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4"
__lowerCamelCase : Tuple = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase : Optional[int] = "A black colored car"
__lowerCamelCase : Dict = "A blue colored car"
__lowerCamelCase : str = torch.manual_seed(0 )
__lowerCamelCase : Tuple = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
__lowerCamelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
__lowerCamelCase : int = init_image.resize((512, 512) )
__lowerCamelCase : List[Any] = "CompVis/stable-diffusion-v1-4"
__lowerCamelCase : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
__lowerCamelCase : str = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase : List[Any] = "A black colored car"
__lowerCamelCase : List[Any] = "A blue colored car"
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Optional[int] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 646
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , __snake_case ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 702
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return sum(int(snake_case_ ) for c in str(abs(snake_case_ ) ) )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(snake_case_ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 415
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = AutoConfig.from_pretrained(snake_case )
__magic_name__ :Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case )
__magic_name__ :Any = checkpoints.load_tax_checkpoint(snake_case )
__magic_name__ :List[str] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__magic_name__ :Tuple = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ :Optional[int] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ :Union[str, Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :Optional[int] = flax_model.params['''encoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :List[Any] = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Optional[int] = tax_attention_query
__magic_name__ :str = tax_attention_value
__magic_name__ :Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ :str = tax_mlp_wi_a
__magic_name__ :Dict = tax_mlp_wi_a
else:
__magic_name__ :Tuple = tax_mlp_wi
__magic_name__ :Optional[int] = tax_mlp_wo
__magic_name__ :Optional[int] = tax_mlp_layer_norm
__magic_name__ :Any = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ :Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :List[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ :Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__magic_name__ :Dict = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__magic_name__ :List[str] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ :List[Any] = f'''layers_{str(snake_case )}'''
# Self-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__magic_name__ :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
__magic_name__ :Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel''']
__magic_name__ :List[str] = tax_enc_dec_attention_module['''query''']['''kernel''']
__magic_name__ :Tuple = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__magic_name__ :List[str] = flax_model.params['''decoder''']['''block'''][str(snake_case )]['''layer''']
__magic_name__ :Any = tax_attention_key
__magic_name__ :List[str] = tax_attention_out
__magic_name__ :Tuple = tax_attention_query
__magic_name__ :Tuple = tax_attention_value
__magic_name__ :Tuple = tax_pre_attention_layer_norm
__magic_name__ :Optional[Any] = tax_enc_dec_attention_key
__magic_name__ :str = tax_enc_dec_attention_out
__magic_name__ :Union[str, Any] = tax_enc_dec_attention_query
__magic_name__ :Any = tax_enc_dec_attention_value
__magic_name__ :Tuple = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ :Optional[int] = tax_mlp_wi_a
__magic_name__ :Union[str, Any] = tax_mlp_wi_a
else:
__magic_name__ :Optional[int] = tax_mlp_wi
__magic_name__ :List[str] = tax_mlp_wo
__magic_name__ :int = txa_mlp_layer_norm
__magic_name__ :str = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__magic_name__ :Tuple = txa_decoder_norm
# Only for layer 0:
__magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__magic_name__ :str = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ :List[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
__magic_name__ :Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ :int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Union[str, Any] , lowercase : int=2_24 , lowercase : Tuple=32 , lowercase : str=[2, 16, 16] , lowercase : str=3 , lowercase : Dict=7_68 , lowercase : Union[str, Any]=12 , lowercase : List[Any]=12 , lowercase : Dict=30_72 , lowercase : int="gelu_fast" , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : List[str]=0.0_2 , lowercase : Tuple=1E-06 , lowercase : Any=True , **lowercase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : str = num_frames
UpperCAmelCase : str = tubelet_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[int] = qkv_bias
super().__init__(**lowercase )
| 595
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def UpperCamelCase__ ( A__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray:
snake_case__ : Tuple = np.nan
for i in range(A__ ):
snake_case__ : Optional[Any] = features[:, labels == i]
snake_case__ : Tuple = data.mean(1 )
# Centralize the data of class i
snake_case__ : Optional[Any] = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ : Optional[int] = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> np.ndarray:
snake_case__ : Tuple = features.mean(1 )
snake_case__ : Any = np.nan
for i in range(A__ ):
snake_case__ : Dict = features[:, labels == i]
snake_case__ : Optional[Any] = data.shape[1]
snake_case__ : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ : Any = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase__ ( A__ , A__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
snake_case__ : List[str] = features.mean(1 )
# Center the dataset
snake_case__ : Optional[Any] = features - np.reshape(A__ , (data_mean.size, 1) )
snake_case__ : List[str] = np.dot(A__ , centered_data.T ) / features.shape[1]
snake_case__ , snake_case__ : List[str] = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case__ : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case__ : Optional[int] = np.dot(filtered_eigenvectors.T , A__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ )
logging.error('Dataset empty' )
raise AssertionError
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case__ , snake_case__ : List[str] = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
snake_case__ : str = eigenvectors[:, ::-1][:, :dimensions]
snake_case__ , snake_case__ , snake_case__ : Dict = np.linalg.svd(A__ )
snake_case__ : List[str] = svd_matrix[:, 0:dimensions]
snake_case__ : Any = np.dot(filtered_svd_matrix.T , A__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=A__ )
logging.error('Dataset empty' )
raise AssertionError
def UpperCamelCase__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
snake_case__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case__ : Any = np.array([0, 0, 0, 1, 1] )
snake_case__ : List[str] = 2
snake_case__ : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
snake_case__ : Optional[Any] = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def UpperCamelCase__ ( ) -> None:
snake_case__ : int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case__ : List[Any] = 2
snake_case__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
snake_case__ : int = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase_ = grid[0]
for row_n in range(1 , len(UpperCAmelCase__ ) ):
lowercase_ = grid[row_n]
lowercase_ = fill_row(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a = False
a = False
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return TrainCommand(UpperCAmelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : ArgumentParser ):
'''simple docstring'''
lowercase_ = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=UpperCamelCase__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=UpperCamelCase__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=UpperCamelCase__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=UpperCamelCase__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=UpperCamelCase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=UpperCamelCase__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=UpperCamelCase__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=UpperCamelCase__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=UpperCamelCase__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=UpperCamelCase__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Union[str, Any] , UpperCamelCase__ : Namespace ):
'''simple docstring'''
lowercase_ = logging.get_logger("""transformers-cli/training""" )
lowercase_ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=UpperCamelCase__ )
lowercase_ = args.output
lowercase_ = args.column_label
lowercase_ = args.column_text
lowercase_ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowercase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowercase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowercase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ = args.validation_split
lowercase_ = args.train_batch_size
lowercase_ = args.valid_batch_size
lowercase_ = args.learning_rate
lowercase_ = args.adam_epsilon
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 412
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
A = """sgugger/tiny-distilbert-classification"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , only_pretrain_model=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , torchscript=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , fpaa=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can\'t do half precision""" )
def _UpperCAmelCase ( self ) -> List[str]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a__ , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Any:
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , save_to_csv=a__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(a__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(a__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(a__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(a__ , """env.csv""" ) , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
benchmark.run()
self.assertTrue(Path(os.path.join(a__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """env.csv""" ) ).exists() )
def _UpperCAmelCase ( self ) -> Tuple:
A = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(a__ ):
self.assertTrue(hasattr(a__ , """sequential""" ) )
self.assertTrue(hasattr(a__ , """cumulative""" ) )
self.assertTrue(hasattr(a__ , """current""" ) )
self.assertTrue(hasattr(a__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a__ , """log.txt""" ) , log_print=a__ , trace_memory_line_by_line=a__ , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(a__ , """log.txt""" ) ).exists() )
| 706
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Any=5 ) -> Optional[Any]:
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
A = torch.tensor(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) ).unsqueeze(0 ) # Batch size 1
A = model(UpperCamelCase__ )[0] # The last hidden-state is the first element of the output tuple
A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A = logits[0, masked_index, :]
A = logits.softmax(dim=0 )
A , A = prob.topk(k=UpperCamelCase__ , dim=0 )
A = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase__ ) )] )
A = tokenizer.mask_token
A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
A = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(UpperCamelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(UpperCamelCase__ ) , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase__ , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowercase : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
_lowercase : int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_lowercase : Optional[int] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 546
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Any=99 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[Any]=37 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[Any]=512 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Tuple=None , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : Dict = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , use_stable_embedding=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str:
UpperCAmelCase_ : str = OpenLlamaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = OpenLlamaModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
UpperCAmelCase_ : Any = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , ) -> Any:
UpperCAmelCase_ : str = OpenLlamaForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Any:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Union[str, Any] = OpenLlamaForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
UpperCAmelCase_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : int = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["hidden_states"][0]
UpperCAmelCase_ : Dict = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ):
__magic_name__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = OpenLlamaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Tuple = input_dict["input_ids"]
UpperCAmelCase_ : Optional[int] = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = OpenLlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : Optional[Any] = "single_label_classification"
UpperCAmelCase_ : Any = input_dict["input_ids"]
UpperCAmelCase_ : Any = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = OpenLlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : str = "multi_label_classification"
UpperCAmelCase_ : List[Any] = input_dict["input_ids"]
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Dict = OpenLlamaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : int = OpenLlamaModel(lowerCAmelCase_ )
original_model.to(lowerCAmelCase_ )
original_model.eval()
UpperCAmelCase_ : List[Any] = original_model(lowerCAmelCase_ ).last_hidden_state
UpperCAmelCase_ : Any = original_model(lowerCAmelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Any = {"type": scaling_type, "factor": 1_0.0}
UpperCAmelCase_ : str = OpenLlamaModel(lowerCAmelCase_ )
scaled_model.to(lowerCAmelCase_ )
scaled_model.eval()
UpperCAmelCase_ : Optional[int] = scaled_model(lowerCAmelCase_ ).last_hidden_state
UpperCAmelCase_ : Tuple = scaled_model(lowerCAmelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-5 ) )
| 95
|
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 629
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=64 , __SCREAMING_SNAKE_CASE : str=4_80_00 , __SCREAMING_SNAKE_CASE : int=4_80 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Optional[int]=10_24 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : float = 0 , __SCREAMING_SNAKE_CASE : float = 1_40_00 , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : str = "fusion" , __SCREAMING_SNAKE_CASE : str = "repeatpad" , **__SCREAMING_SNAKE_CASE : int , ):
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = top_db
__a = truncation
__a = padding
__a = fft_window_size
__a = (fft_window_size >> 1) + 1
__a = hop_length
__a = max_length_s
__a = max_length_s * sampling_rate
__a = sampling_rate
__a = frequency_min
__a = frequency_max
__a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm=__SCREAMING_SNAKE_CASE , mel_scale="htk" , )
__a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def _UpperCAmelCase ( self : List[str] ):
__a = copy.deepcopy(self.__dict__ )
__a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.array , __SCREAMING_SNAKE_CASE : Optional[np.array] = None ):
__a = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__SCREAMING_SNAKE_CASE , log_mel="dB" , )
return log_mel_spectrogram.T
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
__a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__a = [0]
# randomly choose index for each part
__a = np.random.choice(ranges[0] )
__a = np.random.choice(ranges[1] )
__a = np.random.choice(ranges[2] )
__a = mel[idx_front : idx_front + chunk_frames, :]
__a = mel[idx_middle : idx_middle + chunk_frames, :]
__a = mel[idx_back : idx_back + chunk_frames, :]
__a = torch.tensor(mel[None, None, :] )
__a = torch.nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__SCREAMING_SNAKE_CASE )
__a = mel_shrink[0][0].numpy()
__a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.array , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__a = len(__SCREAMING_SNAKE_CASE ) - max_length
__a = np.random.randint(0 , overflow + 1 )
__a = waveform[idx : idx + max_length]
__a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
__a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__a = np.stack([mel, mel, mel, mel] , axis=0 )
__a = False
else:
__a = self._random_mel_fusion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__a = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
__a = np.stack(np.tile(__SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__a = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
__a = np.stack(np.tile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__a = np.pad(__SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
__a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : int , ):
__a = truncation if truncation is not None else self.truncation
__a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__a = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__a = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__a = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [np.asarray(__SCREAMING_SNAKE_CASE )]
# convert to mel spectrogram, truncate and pad if needed.
__a = [
self._get_input_mel(__SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for waveform in raw_speech
]
__a = []
__a = []
for mel, longer in padded_inputs:
input_mel.append(__SCREAMING_SNAKE_CASE )
is_longer.append(__SCREAMING_SNAKE_CASE )
if truncation == "fusion" and sum(__SCREAMING_SNAKE_CASE ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__a = np.random.randint(0 , len(__SCREAMING_SNAKE_CASE ) )
__a = True
if isinstance(input_mel[0] , __SCREAMING_SNAKE_CASE ):
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__a = [[longer] for longer in is_longer]
__a = {"input_features": input_mel, "is_longer": is_longer}
__a = BatchFeature(__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
__a = input_features.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return input_features
| 721
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __A ( _A , _A=False , _A=False ):
"""simple docstring"""
__a = "backbone." if is_semantic else ""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __A ( _A , _A , _A=False , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = "backbone." if is_semantic else ""
# queries, keys and values
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__a = in_proj_weight[
: config.hidden_size, :
]
__a = q_bias
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__a = gamma_a
__a = gamma_a
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = dct.pop(_A )
__a = val
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _A , _A , _A=False ):
"""simple docstring"""
__a = False if "rvlcdip" in checkpoint_url else True
__a = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
# labels
if "rvlcdip" in checkpoint_url:
__a = 16
__a = "huggingface/label-files"
__a = "rvlcdip-id2label.json"
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["model"]
__a = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__a = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(_A )
__a = outputs.logits
# verify logits
__a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_A , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 525
| 0
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __a (UpperCamelCase_):
'''simple docstring'''
# to overwrite at feature extractactor specific tests
_SCREAMING_SNAKE_CASE :List[Any] = None
_SCREAMING_SNAKE_CASE :Tuple = None
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
self.assertTrue(hasattr(_a , """padding_value""" ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
SCREAMING_SNAKE_CASE__ : int = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
SCREAMING_SNAKE_CASE__ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
SCREAMING_SNAKE_CASE__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
SCREAMING_SNAKE_CASE__ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
SCREAMING_SNAKE_CASE__ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self , _a=False ) -> Any:
"""simple docstring"""
def _inputs_have_equal_length(_a ):
SCREAMING_SNAKE_CASE__ : int = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : List[str] = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE__ : int = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE__ : int = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE__ : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(_a , padding=_a )
SCREAMING_SNAKE_CASE__ : Dict = input_a[input_name]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.pad(_a , padding="""longest""" )
SCREAMING_SNAKE_CASE__ : str = input_a[input_name]
SCREAMING_SNAKE_CASE__ : str = feat_extract.pad(_a , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : List[str] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="""max_length""" )[input_name]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.pad(
_a , padding="""max_length""" , max_length=_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract.pad(_a , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE__ : List[str] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(_a , padding="""longest""" , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract.pad(
_a , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : Dict = feat_extract.pad(
_a , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_a , return_tensors="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
SCREAMING_SNAKE_CASE__ : str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _a ( self , _a=False ) -> Optional[int]:
"""simple docstring"""
def _inputs_have_equal_length(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a , _a ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a , _a ):
if not np.allclose(np.asarray(_a ) , np.asarray(_a ) , atol=1E-3 ):
return False
return True
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.pad(_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE__ : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE__ : Tuple = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_a , )
SCREAMING_SNAKE_CASE__ : int = input_a[input_name]
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_a , return_tensors="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a , _a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="""longest""" , truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="""longest""" , truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a , padding="""max_length""" , truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE__ : List[str] = 12
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , truncation=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = input_a[input_name]
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
_a , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE__ : Dict = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE__ : Tuple = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def _a ( self ) -> int:
"""simple docstring"""
self._check_padding(numpify=_a )
def _a ( self ) -> int:
"""simple docstring"""
self._check_padding(numpify=_a )
def _a ( self ) -> Any:
"""simple docstring"""
self._check_truncation(numpify=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self._check_truncation(numpify=_a )
@require_torch
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )[input_name]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.pad(_a , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[str] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )[input_name]
SCREAMING_SNAKE_CASE__ : str = feat_extract.pad(_a , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extraction_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = [len(_a ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Dict = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Dict = feat_extract.pad(_a , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = [len(_a ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : str = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(_a )
SCREAMING_SNAKE_CASE__ : Dict = feat_extract.pad(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 680
|
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = BertJapaneseTokenizer
snake_case = False
snake_case = True
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().setUp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。'''
A_ : List[str] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ , A_ : Optional[int] = self.get_input_output_texts(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return text, ids
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->str:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Tuple = self.tokenizer_class(self.vocab_file )
A_ : Optional[int] = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
A_ : str = '''こんにちは、世界。\nこんばんは、世界。'''
A_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A_ : Dict = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as handle:
A_ : Any = pickle.load(_SCREAMING_SNAKE_CASE )
A_ : Any = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : List[str] = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _snake_case ( self )->Dict:
'''simple docstring'''
try:
A_ : Optional[int] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _snake_case ( self )->str:
'''simple docstring'''
try:
A_ : Union[str, Any] = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = MecabTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _snake_case ( self )->List[str]:
'''simple docstring'''
try:
A_ : Tuple = MecabTokenizer(
do_lower_case=_SCREAMING_SNAKE_CASE , normalize_text=_SCREAMING_SNAKE_CASE , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = MecabTokenizer(normalize_text=_SCREAMING_SNAKE_CASE , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
A_ : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
A_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A_ : List[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as handle:
A_ : Tuple = pickle.load(_SCREAMING_SNAKE_CASE )
A_ : Dict = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_sudachi
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Any = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Any = SudachiTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = SudachiTokenizer(normalize_text=_SCREAMING_SNAKE_CASE , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = SudachiTokenizer(trim_whitespace=_SCREAMING_SNAKE_CASE , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
A_ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A_ : List[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as handle:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as handle:
A_ : str = pickle.load(_SCREAMING_SNAKE_CASE )
A_ : Dict = tokenizer_new.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_jumanpp
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _snake_case ( self )->int:
'''simple docstring'''
A_ : str = JumanppTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[str] = JumanppTokenizer(normalize_text=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : List[Any] = JumanppTokenizer(trim_whitespace=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
A_ : Optional[Any] = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = i
A_ : Optional[int] = WordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
A_ : Dict = tokenizer.subword_tokenizer
A_ : List[Any] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
A_ : Optional[int] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
A_ : Optional[Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : Dict = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = BertJapaneseTokenizer
snake_case = False
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().setUp()
A_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。'''
A_ : Dict = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _snake_case ( self )->int:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
A_ : Any = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
A_ : Optional[int] = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = i
A_ : Optional[int] = CharacterTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : List[str] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
A_ : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = '''cl-tohoku/bert-base-japanese'''
A_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
A_ : Optional[int] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 152
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = None
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = sigma_max
# setable values
A_ : int = None
A_ : np.IntTensor = None
A_ : torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[Any]:
'''simple docstring'''
A_ : int = num_inference_steps
A_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
A_ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device )
A_ : Any = sigma + gamma * sigma
A_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Dict = sample_hat + sigma_hat * model_output
A_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
A_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Any = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
raise NotImplementedError()
| 152
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : List[Any] = """lilt"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=30522 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-1_2 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Tuple="absolute" , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=1024 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = channel_shrink_ratio
lowerCamelCase_ = max_ad_position_embeddings
| 549
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = (DDIMParallelScheduler,)
UpperCamelCase__ : Optional[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowerCamelCase ( self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : Dict , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE)
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a , __a = 10, 0.0
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE)
for t in scheduler.timesteps:
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).prev_sample
return sample
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE)
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(steps_offset=1)
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1E-5
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a , __a = 10, 0.0
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , __SCREAMING_SNAKE_CASE)
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_147.7_904) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.full_loop()
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 172.0_067) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.full_loop(prediction_type='''v_prediction''')
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01)
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 149.8_295) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01)
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 149.0_784) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 700
|
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 0
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowercase( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class lowerCamelCase__ ( nn.Module):
"""simple docstring"""
def __init__(self ):
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Linear(3 , 4 )
lowerCamelCase = nn.BatchNormad(4 )
lowerCamelCase = nn.Linear(4 , 5 )
def _a (self , __a ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__a ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_28, 64, 32, 16, 8] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__a , __a ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase , lowerCamelCase = mock_training_loop_function("hello" )
self.assertListEqual(__a , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _a (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _a (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _a (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__a , __a , __a ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_28 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _a (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__a ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _a (self ):
'''simple docstring'''
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
lowerCamelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 623
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a (self ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
assert image.shape[0] == 2
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase = unet.half()
lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a (self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 623
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase_ ="all_checks"
UpperCAmelCase_ ="basic_checks"
UpperCAmelCase_ ="no_checks"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE_ = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info('''All the splits matched successfully.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase = True ):
if record_checksum:
SCREAMING_SNAKE_CASE_ = shaaaa()
with open(__lowerCamelCase, '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ), B'''''' ):
m.update(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def A__ ( __lowerCamelCase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 712
|
from math import factorial
def A__ ( __lowerCamelCase = 20 ):
SCREAMING_SNAKE_CASE_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ = n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 597
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = KandinskyInpaintPipeline
A__ : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
A__ : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
A__ : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ : Dict = False
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def _a ( self : int ):
"""simple docstring"""
return 32
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def _a ( self : Any ):
"""simple docstring"""
A__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _a ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A__ = MultilingualCLIP(_snake_case )
A__ = text_encoder.eval()
return text_encoder
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**_snake_case )
return model
@property
def _a ( self : int ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=_snake_case , )
A__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self : str , _snake_case : int , _snake_case : List[str]=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _a ( self : List[str] ):
"""simple docstring"""
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_snake_case )
A__ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
A__ = pipe(**self.get_dummy_inputs(_snake_case ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _a ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = 'a hat'
A__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
A__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
A__ = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ , A__ = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A__ = pipeline(
_snake_case , image=_snake_case , mask_image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 9
|
import datasets
A : Optional[int] = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
A : Optional[int] = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
A : str = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
| 287
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__A = 42
__A = None
__A = None
lowercase_ : str = namedtuple('''CoinsDistribResult''', '''moves excess''')
def SCREAMING_SNAKE_CASE ( lowercase_ : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase = get_distrib(node.left )
lowercase , lowercase = get_distrib(node.right )
lowercase = 1 - left_distrib_excess
lowercase = 1 - right_distrib_excess
lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ , lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[int] , *a :Optional[int] , **a :Optional[int] ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :str , *a :Optional[Any] , **a :List[str] ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[int] , *a :int , **a :Union[str, Any] ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Union[str, Any] , *a :Dict , **a :int ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :Tuple , **a :List[Any] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[Any] , *a :Dict , **a :Any ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Union[str, Any] , *a :List[Any] , **a :Optional[Any] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[int] , *a :Optional[int] , **a :List[str] ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[str] , *a :Any , **a :int ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :Union[str, Any] , **a :Optional[int] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[int] , *a :Optional[int] , **a :List[str] ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :str , *a :Optional[int] , **a :List[Any] ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[str] , *a :Tuple , **a :Union[str, Any] ) -> int:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :Any , **a :Tuple ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Any , *a :Any , **a :Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[Any] , *a :Optional[int] , **a :Optional[Any] ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[int] , *a :Optional[int] , **a :Any ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :str , **a :Union[str, Any] ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Optional[Any] , *a :Optional[Any] , **a :int ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[Any] , *a :str , **a :Any ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :int , *a :int , **a :Tuple ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Any , *a :int , **a :Any ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Any , *a :Optional[Any] , **a :Optional[Any] ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Any , *a :List[str] , **a :Dict ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :str , *a :str , **a :List[Any] ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[str] , *a :Union[str, Any] , **a :Optional[Any] ) -> int:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :Dict , **a :Optional[Any] ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Tuple , *a :Optional[int] , **a :Any ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Dict , *a :List[Any] , **a :Union[str, Any] ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :List[str] , *a :int , **a :Union[str, Any] ) -> str:
requires_backends(self , ["sentencepiece"] )
class lowerCamelCase__ ( metaclass=__lowercase):
'''simple docstring'''
_A = ['sentencepiece']
def __init__( self :Dict , *a :str , **a :str ) -> Any:
requires_backends(self , ["sentencepiece"] )
| 557
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 557
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Optional[Any] , a__ : Optional[Any]=768 ):
"""simple docstring"""
super().__init__(a__ )
__snake_case = proj_size
__snake_case = CLIPVisionModel(a__ )
__snake_case = PaintByExampleMapper(a__ )
__snake_case = nn.LayerNorm(config.hidden_size )
__snake_case = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def a (self : Optional[Any] , a__ : List[Any] , a__ : List[str]=False ):
"""simple docstring"""
__snake_case = self.model(pixel_values=a__ )
__snake_case = clip_output.pooler_output
__snake_case = self.mapper(latent_states[:, None] )
__snake_case = self.final_layer_norm(a__ )
__snake_case = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : str , a__ : Any ):
"""simple docstring"""
super().__init__()
__snake_case = (config.num_hidden_layers + 1) // 5
__snake_case = config.hidden_size
__snake_case = 1
__snake_case = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn='''gelu''' , attention_bias=a__ )
for _ in range(a__ )
] )
def a (self : Any , a__ : Any ):
"""simple docstring"""
for block in self.blocks:
__snake_case = block(a__ )
return hidden_states
| 388
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 16
snake_case_ = 32
def lowerCamelCase__ ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> List[str]:
__snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 16
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
snake_case_ , padding='''longest''' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case_ ) == "1":
__snake_case = 2
# New Code #
__snake_case = int(args.gradient_accumulation_steps )
__snake_case = int(args.local_sgd_steps )
# Initialize accelerator
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['''lr''']
__snake_case = int(config['''num_epochs'''] )
__snake_case = int(config['''seed'''] )
__snake_case = int(config['''batch_size'''] )
__snake_case = evaluate.load('''glue''' , '''mrpc''' )
set_seed(snake_case_ )
__snake_case , __snake_case = get_dataloaders(snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=100 , num_training_steps=(len(snake_case_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ , model=snake_case_ , local_sgd_steps=snake_case_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
__snake_case = model(**snake_case_ )
__snake_case = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**snake_case_ )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case_ , default=snake_case_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=snake_case_ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__snake_case = parser.parse_args()
__snake_case = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 388
| 1
|
'''simple docstring'''
from __future__ import annotations
A = []
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : int):
for i in range(len(UpperCAmelCase__)):
if board[row][i] == 1:
return False
for i in range(len(UpperCAmelCase__)):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCAmelCase__ , -1 , -1) , range(UpperCAmelCase__ , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCAmelCase__ , -1 , -1) , range(UpperCAmelCase__ , len(UpperCAmelCase__))):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int):
if row >= len(UpperCAmelCase__):
solution.append(UpperCAmelCase__)
printboard(UpperCAmelCase__)
print()
return True
for i in range(len(UpperCAmelCase__)):
if is_safe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__):
lowerCamelCase : Optional[Any] = 1
solve(UpperCAmelCase__ , row + 1)
lowerCamelCase : Any = 0
return False
def UpperCAmelCase ( UpperCAmelCase__ : list[list[int]]):
for i in range(len(UpperCAmelCase__)):
for j in range(len(UpperCAmelCase__)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
A = 8
A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 320
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['ConvNextFeatureExtractor']
A : Optional[Any] = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 15
| 0
|
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 718
|
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ ,lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 96
| 0
|
'''simple docstring'''
from statistics import mean
import numpy as np
def UpperCamelCase ( lowercase_ : list , lowercase_ : list , lowercase_ : list , lowercase_ : int ) -> list:
'''simple docstring'''
lowercase =0
# Number of processes finished
lowercase =0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase =[0] * no_of_process
# List to include calculation results
lowercase =[0] * no_of_process
# Sort by arrival time.
lowercase =[burst_time[i] for i in np.argsort(lowercase_ )]
lowercase =[process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase =0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase =arrival_time[i]
lowercase =0
# Index showing the location of the process being performed
lowercase =0
# Saves the current response ratio.
lowercase =0
for i in range(0 , lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase =temp
lowercase =i
# Calculate the turn around time
lowercase =current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase =1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCamelCase ( lowercase_ : list , lowercase_ : list , lowercase_ : list , lowercase_ : int ) -> list:
'''simple docstring'''
lowercase =[0] * no_of_process
for i in range(0 , lowercase_ ):
lowercase =turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCAmelCase : str = 5
_UpperCAmelCase : Optional[Any] = ['''A''', '''B''', '''C''', '''D''', '''E''']
_UpperCAmelCase : Dict = [1, 2, 3, 4, 5]
_UpperCAmelCase : List[Any] = [1, 2, 3, 4, 5]
_UpperCAmelCase : Optional[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCAmelCase : Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 72
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = BlenderbotSmallTokenizer
lowercase_ : List[str] = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
super().setUp()
__lowerCAmelCase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__lowerCAmelCase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def UpperCAmelCase__ ( self : Any , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = "adapt act apte"
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = ["adapt", "act", "ap@@", "te"]
__lowerCAmelCase = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
__lowerCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowerCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_384]
__lowerCAmelCase = "I am a small frog."
__lowerCAmelCase = tok([src_text] , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
__lowerCAmelCase = tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__lowerCAmelCase = "I am a small frog ."
__lowerCAmelCase = "."
__lowerCAmelCase = tok(snake_case__ )["input_ids"]
__lowerCAmelCase = tok(snake_case__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 611
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A__= 'swin'
A__= {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , _lowercase : str=2_24 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=3 , _lowercase : str=96 , _lowercase : Dict=[2, 2, 6, 2] , _lowercase : str=[3, 6, 12, 24] , _lowercase : Union[str, Any]=7 , _lowercase : Tuple=4.0 , _lowercase : Union[str, Any]=True , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.0 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Union[str, Any]=False , _lowercase : List[str]=0.0_2 , _lowercase : List[str]=1E-5 , _lowercase : str=32 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , **_lowercase : Dict , ):
"""simple docstring"""
super().__init__(**_lowercase )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = len(_lowercase )
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
UpperCAmelCase__ = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_lowercase ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= version.parse('1.11' )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return 1E-4
| 700
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = "ylacombe/bark-small"
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = "en_speaker_1"
UpperCAmelCase__ = "This is a test string"
UpperCAmelCase__ = "speaker_embeddings_path.json"
UpperCAmelCase__ = "speaker_embeddings"
def _UpperCAmelCase ( self : List[str] , **_lowercase : str ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowercase )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=_lowercase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase__ = 35
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = {
"semantic_prompt": np.ones(_lowercase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=_lowercase )
UpperCAmelCase__ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowercase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_lowercase , **_lowercase )
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=_lowercase )
UpperCAmelCase__ = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowercase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=_lowercase )
UpperCAmelCase__ = processor(text=self.input_string )
UpperCAmelCase__ = tokenizer(
self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 277
| 0
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ : List[str] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
A_ : List[str] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Optional[Any] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : int = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
A_ : Union[str, Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : int = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
A_ : Optional[int] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
A_ : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
A_ : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
A_ : List[Any] = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
A_ : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Dict = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
A_ : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
A_ : List[str] = ""
A_ : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Tuple = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert ReadMe.from_string(__magic_name__ , __magic_name__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
snake_case__ : Optional[Any] = ReadMe.from_string(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> str:
'''simple docstring'''
ReadMe.from_string(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Optional[Any] = ReadMe.from_readme(__magic_name__ , __magic_name__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Tuple = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
snake_case__ : Tuple = ReadMe.from_readme(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Tuple ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : List[str] = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
ReadMe.from_readme(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
ReadMe.from_readme(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
| 38
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''vit_msn'''
def __init__( self : List[str] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[Any]=1e-06 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) ->int:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 390
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class __snake_case ( _lowercase):
def __init__( self : Tuple , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_image(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = image.size
_lowerCamelCase : Optional[Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model(**__lowerCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : str = model_outputs.predicted_depth
_lowerCamelCase : int = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = prediction.squeeze().cpu().numpy()
_lowerCamelCase : Any = (output * 2_5_5 / np.max(__lowerCAmelCase )).astype('''uint8''' )
_lowerCamelCase : Any = Image.fromarray(__lowerCAmelCase )
_lowerCamelCase : int = {}
_lowerCamelCase : Optional[Any] = predicted_depth
_lowerCamelCase : Any = depth
return output_dict
| 598
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Union[str, Any]=3_2 ):
"""simple docstring"""
set_seed(0 )
_lowerCamelCase : str = UNetaDModel(sample_size=__lowerCAmelCase , in_channels=3 , out_channels=3 )
_lowerCamelCase : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_lowerCamelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : List[Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(__lowerCAmelCase ) for _ in range(4 )]
_lowerCamelCase : Any = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
_lowerCamelCase , _lowerCamelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Any = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[str] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_lowerCamelCase , _lowerCamelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(__lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowerCamelCase : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , timesteps[i] ).sample
_lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(__lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
| 598
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=64 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : List[Any] = batch_size
lowerCamelCase_ : List[Any] = seq_length
lowerCamelCase_ : Tuple = is_training
lowerCamelCase_ : Optional[Any] = use_input_mask
lowerCamelCase_ : Any = use_token_type_ids
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : int = embedding_size
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : List[str] = type_vocab_size
lowerCamelCase_ : Dict = type_sequence_label_size
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : int = num_labels
lowerCamelCase_ : Union[str, Any] = num_choices
lowerCamelCase_ : Union[str, Any] = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_input_mask:
lowerCamelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : int = None
if self.use_token_type_ids:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = MegatronBertModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ )
lowerCamelCase_ : List[str] = model(a_ , token_type_ids=a_ )
lowerCamelCase_ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = MegatronBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[int] = MegatronBertForCausalLM(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = MegatronBertForNextSentencePrediction(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = MegatronBertForPreTraining(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , next_sentence_label=a_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = MegatronBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : str = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Any = self.num_labels
lowerCamelCase_ : Dict = MegatronBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[int] = self.num_labels
lowerCamelCase_ : str = MegatronBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = self.num_choices
lowerCamelCase_ : Dict = MegatronBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase_ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = True
# test_resize_embeddings = False
__UpperCAmelCase : str = False
def _UpperCamelCase ( self , a_ , a_ , a_=False ):
lowerCamelCase_ : Union[str, Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
lowerCamelCase_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ )
lowerCamelCase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = MegatronBertModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*a_ )
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return torch.tensor(
lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ , )
__magic_name__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
lowerCamelCase_ : int = os.path.join(os.environ["MYDIR"] , a_ )
lowerCamelCase_ : str = MegatronBertModel.from_pretrained(a_ )
model.to(a_ )
model.half()
lowerCamelCase_ : List[str] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCamelCase_ : List[str] = model(a_ )[0]
lowerCamelCase_ : List[str] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : int = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase_ : Union[str, Any] = output[0, ii, jj]
lowerCamelCase_ : Tuple = expected[3 * ii + jj]
lowerCamelCase_ : Optional[int] = "ii={} jj={} a={} b={}".format(a_ , a_ , a_ , a_ )
self.assertTrue(math.isclose(a_ , a_ , rel_tol=a_ , abs_tol=a_ ) , msg=a_ )
| 250
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : List[Any] = use_attention_mask
lowerCamelCase_ : Tuple = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = num_choices
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_attention_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a_ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowerCamelCase_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Any = model(a_ )[0]
lowerCamelCase_ : List[str] = 5_0000
lowerCamelCase_ : List[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 250
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__a = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {}
state_dict.pop('''pixel_mean''' , snake_case__ )
state_dict.pop('''pixel_std''' , snake_case__ )
lowercase_ = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase_ = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
lowercase_ = int(re.match(snake_case__ , snake_case__ ).group(2 ) )
if layer_nb == 0:
lowercase_ = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowercase_ = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowercase_ = key.replace('''layers.2''' , '''proj_out''' )
lowercase_ = value
lowercase_ = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def a ( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: str , snake_case__: Optional[int]="ybelkada/segment-anything" ):
'''simple docstring'''
lowercase_ = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase_ = SamConfig()
elif "sam_vit_l" in model_name:
lowercase_ = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase_ = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowercase_ = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase_ = SamConfig(
vision_config=snake_case__ , )
lowercase_ = torch.load(snake_case__ , map_location='''cpu''' )
lowercase_ = replace_keys(snake_case__ )
lowercase_ = SamImageProcessor()
lowercase_ = SamProcessor(image_processor=snake_case__ )
lowercase_ = SamModel(snake_case__ )
hf_model.load_state_dict(snake_case__ )
lowercase_ = hf_model.to('''cuda''' )
lowercase_ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
lowercase_ = [[[400, 650]]]
lowercase_ = [[1]]
lowercase_ = processor(images=np.array(snake_case__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase_ = hf_model(**snake_case__ )
lowercase_ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase_ = processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase_ = hf_model(**snake_case__ )
lowercase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase_ = ((75, 275, 1_725, 850),)
lowercase_ = processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase_ = hf_model(**snake_case__ )
lowercase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase_ = [[[400, 650], [800, 650]]]
lowercase_ = [[1, 1]]
lowercase_ = processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase_ = hf_model(**snake_case__ )
lowercase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__a = argparse.ArgumentParser()
__a = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__a = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 409
|
def a ( ):
'''simple docstring'''
lowercase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase_ = 6
lowercase_ = 1
lowercase_ = 1_901
lowercase_ = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase_ = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 409
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.