code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_A : Tuple = logging.get_logger(__name__)
_A : Tuple = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : int = "longformer"
def __init__( self : Optional[Any] , A : Union[List[int], int] = 5_1_2 , A : int = 2 , A : int = 1 , A : int = 0 , A : int = 2 , A : int = 3_0_5_2_2 , A : int = 7_6_8 , A : int = 1_2 , A : int = 1_2 , A : int = 3_0_7_2 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : int = 5_1_2 , A : int = 2 , A : float = 0.02 , A : float = 1e-12 , A : bool = False , **A : List[str] , ) ->int:
super().__init__(pad_token_id=A , **A )
lowerCamelCase__ : Dict = attention_window
lowerCamelCase__ : int = sep_token_id
lowerCamelCase__ : Union[str, Any] = bos_token_id
lowerCamelCase__ : List[Any] = eos_token_id
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : str = onnx_export
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : List[Any] , A : "PretrainedConfig" , A : str = "default" , A : "List[PatchingSpec]" = None ) ->int:
super().__init__(A , A , A )
lowerCamelCase__ : Union[str, Any] = True
@property
def __lowerCamelCase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : List[Any] = super().outputs
if self.task == "default":
lowerCamelCase__ : str = {0: '''batch'''}
return outputs
@property
def __lowerCamelCase ( self : str ) ->float:
return 1e-4
@property
def __lowerCamelCase ( self : List[str] ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __lowerCamelCase ( self : int , A : "PreTrainedTokenizerBase" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : str = super().generate_dummy_inputs(
preprocessor=A , batch_size=A , seq_length=A , is_pair=A , framework=A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Any = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCamelCase__ : Optional[int] = 1
return inputs
| 142
|
import datasets
from .evaluate import evaluate
_A : Optional[int] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_A : int = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCamelCase ( self : Dict , A : Tuple , A : Tuple ) ->int:
lowerCamelCase__ : Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase__ : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Tuple = evaluate(dataset=A , predictions=A )
return score
| 142
| 1
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
from __future__ import annotations
import math
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = str(__lowerCAmelCase )
_UpperCAmelCase = [n]
for i in range(1 , len(__lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
if len(str(__lowerCAmelCase ) ) > 3:
if not is_prime(int(str(__lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(__lowerCAmelCase )[:3] ) ):
return False
return True
def __A ( __lowerCAmelCase = 11 )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(__lowerCAmelCase ) != count:
if validate(__lowerCAmelCase ):
_UpperCAmelCase = list_truncated_nums(__lowerCAmelCase )
if all(is_prime(__lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(__lowerCAmelCase )
num += 2
return list_truncated_primes
def __A ( )-> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 39
|
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case_ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case_ = numpy.zeros(output_array.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self : Optional[Any] , __lowercase : numpy.ndarray , __lowercase : int , __lowercase : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
snake_case_ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def snake_case__ ( self : Union[str, Any] , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_arr
snake_case_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case_ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def a ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case )
def a ( self ):
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=snake_case , vqvae=snake_case , scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=snake_case , num_inference_steps=2 , output_type='numpy' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=snake_case , num_inference_steps=2 , output_type='numpy' , return_dict=snake_case )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case_ = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=snake_case , num_inference_steps=5 , output_type='numpy' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case_ = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 200
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 200
| 1
|
"""simple docstring"""
import argparse
import json
import subprocess
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =[]
a =(
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
a =subprocess.run(_A , shell=_A , stdout=subprocess.PIPE )
a =output.stdout.decode('''utf-8''' )
a =json.loads(_A )
a =status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_A )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
if len(_A ) > 0:
a ='''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def _A ( lowercase ):
"""simple docstring"""
return values.split(''',''' )
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCamelCase_ : Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 81
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None:
super().__init__(**UpperCamelCase )
snake_case__ = size if size is not None else {'shortest_edge': 2_56}
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray:
snake_case__ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray:
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray:
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]:
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(UpperCamelCase )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
snake_case__ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 307
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , lowercase : AutoencoderKL , lowercase : CLIPTextModel , lowercase : CLIPTokenizer , lowercase : UNetaDConditionModel , lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase : StableDiffusionSafetyChecker , lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def lowercase__ ( self : Tuple , lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ :Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
self.enable_attention_slicing(lowercase )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase : Union[str, List[str]] , lowercase : int = 512 , lowercase : int = 512 , lowercase : int = 50 , lowercase : float = 7.5 , lowercase : Optional[Union[str, List[str]]] = None , lowercase : Optional[int] = 1 , lowercase : float = 0.0 , lowercase : Optional[torch.Generator] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase : int = 1 , lowercase : Optional[torch.FloatTensor] = None , **lowercase : int , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
lowercase_ :Tuple = 1
elif isinstance(lowercase , lowercase ):
lowercase_ :str = len(lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowercase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowercase )}.' )
# get prompt text embeddings
lowercase_ :int = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase_ :Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase_ :Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase_ :List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase_ :Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase_ :Optional[int] = text_embeddings.shape
lowercase_ :str = text_embeddings.repeat(1 , lowercase , 1 )
lowercase_ :Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase_ :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase_ :List[str]
if negative_prompt is None:
lowercase_ :Tuple = [""]
elif type(lowercase ) is not type(lowercase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !='
F' {type(lowercase )}.' )
elif isinstance(lowercase , lowercase ):
lowercase_ :Optional[Any] = [negative_prompt]
elif batch_size != len(lowercase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
lowercase_ :Union[str, Any] = negative_prompt
lowercase_ :Optional[int] = text_input_ids.shape[-1]
lowercase_ :Any = self.tokenizer(
lowercase , padding="max_length" , max_length=lowercase , truncation=lowercase , return_tensors="pt" , )
lowercase_ :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ :List[Any] = uncond_embeddings.shape[1]
lowercase_ :str = uncond_embeddings.repeat(lowercase , lowercase , 1 )
lowercase_ :Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ :Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase_ :Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase_ :Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase_ :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase_ :Optional[Any] = torch.randn(
lowercase , generator=lowercase , device="cpu" , dtype=lowercase ).to(self.device )
lowercase_ :Union[str, Any] = torch.randn(lowercase , generator=lowercase , device="cpu" , dtype=lowercase ).to(
self.device )
else:
lowercase_ :Any = torch.randn(
lowercase , generator=lowercase , device=self.device , dtype=lowercase )
lowercase_ :int = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowercase_ :Optional[int] = latents_reference.to(self.device )
lowercase_ :int = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase_ :List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase_ :List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase_ :str = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase_ :str = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase_ :Dict = 0 if dx < 0 else dx
lowercase_ :str = 0 if dy < 0 else dy
lowercase_ :Optional[Any] = max(-dx , 0 )
lowercase_ :Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase_ :List[str] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase_ :List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ :Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase_ :Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase_ :Optional[int] = {}
if accepts_eta:
lowercase_ :Tuple = eta
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ :int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ :Any = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowercase_ :Tuple = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase_ :Union[str, Any] = noise_pred.chunk(2 )
lowercase_ :List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase_ :Tuple = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase , lowercase )
lowercase_ :str = 1 / 0.1_82_15 * latents
lowercase_ :Dict = self.vae.decode(lowercase ).sample
lowercase_ :Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase_ :int = self.feature_extractor(self.numpy_to_pil(lowercase ) , return_tensors="pt" ).to(
self.device )
lowercase_ :Union[str, Any] = self.safety_checker(
images=lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase_ :Tuple = None
if output_type == "pil":
lowercase_ :List[Any] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
| 350
|
'''simple docstring'''
import sys
lowerCAmelCase : List[Any] =(
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = 1
for digit in s:
product *= int(__lowerCamelCase )
return product
def UpperCAmelCase_ ( __lowerCamelCase : str = N ):
lowercase_ :Any = -sys.maxsize - 1
lowercase_ :int = n[:13]
lowercase_ :Optional[int] = 13
while cur_index < len(__lowerCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ :str = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ :List[str] = max(__lowerCamelCase ,str_eval(__lowerCamelCase ) )
lowercase_ :Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 147
| 0
|
import string
import numpy
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , __UpperCAmelCase )
class lowercase__ :
'''simple docstring'''
a : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a : Optional[int] = numpy.vectorize(lambda __lowerCamelCase : x % 36 )
a : Union[str, Any] = numpy.vectorize(__lowerCamelCase )
def __init__( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.modulus(__magic_name__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCamelCase__ : Optional[Any] = encrypt_key.shape[0]
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return self.key_string.index(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
return self.key_string[round(__magic_name__ )]
def UpperCamelCase__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase__ : str = det % len(self.key_string )
UpperCamelCase__ : List[Any] = len(self.key_string )
if greatest_common_divisor(__magic_name__, len(self.key_string ) ) != 1:
UpperCamelCase__ : Dict = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = [char for char in text.upper() if char in self.key_string]
UpperCamelCase__ : Tuple = chars[-1]
while len(__magic_name__ ) % self.break_key != 0:
chars.append(__magic_name__ )
return "".join(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.process_text(text.upper() )
UpperCamelCase__ : List[Any] = ''''''
for i in range(0, len(__magic_name__ ) - self.break_key + 1, self.break_key ):
UpperCamelCase__ : Tuple = text[i : i + self.break_key]
UpperCamelCase__ : str = [self.replace_letters(__magic_name__ ) for char in batch]
UpperCamelCase__ : Dict = numpy.array([vec] ).T
UpperCamelCase__ : Tuple = self.modulus(self.encrypt_key.dot(__magic_name__ ) ).T.tolist()[
0
]
UpperCamelCase__ : Dict = ''''''.join(
self.replace_digits(__magic_name__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase__ ( self ) -> numpy.ndarray:
"""simple docstring"""
UpperCamelCase__ : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase__ : List[Any] = det % len(self.key_string )
UpperCamelCase__ : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCamelCase__ : Any = i
break
UpperCamelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.make_decrypt_key()
UpperCamelCase__ : Tuple = self.process_text(text.upper() )
UpperCamelCase__ : Dict = ''''''
for i in range(0, len(__magic_name__ ) - self.break_key + 1, self.break_key ):
UpperCamelCase__ : Optional[Any] = text[i : i + self.break_key]
UpperCamelCase__ : Tuple = [self.replace_letters(__magic_name__ ) for char in batch]
UpperCamelCase__ : List[Any] = numpy.array([vec] ).T
UpperCamelCase__ : int = self.modulus(decrypt_key.dot(__magic_name__ ) ).T.tolist()[0]
UpperCamelCase__ : Any = ''''''.join(
self.replace_digits(__magic_name__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : Dict = int(input('''Enter the order of the encryption key: ''' ) )
UpperCamelCase__ : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__UpperCAmelCase ):
UpperCamelCase__ : List[Any] = [int(__UpperCAmelCase ) for x in input().split()]
hill_matrix.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = HillCipher(numpy.array(__UpperCAmelCase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCamelCase__ : Union[str, Any] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCamelCase__ : List[str] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__UpperCAmelCase ) )
elif option == "2":
UpperCamelCase__ : List[Any] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __UpperCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 201
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
with open(_A , """r""" ) as f:
SCREAMING_SNAKE_CASE = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : int="<cls>" ,lowerCamelCase__ : Dict="<pad>" ,lowerCamelCase__ : str="<mask>" ,lowerCamelCase__ : str="<eos>" ,**lowerCamelCase__ : List[str] ,) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE = load_vocab_file(__snake_case )
SCREAMING_SNAKE_CASE = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE = unk_token
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = pad_token
SCREAMING_SNAKE_CASE = mask_token
SCREAMING_SNAKE_CASE = eos_token
SCREAMING_SNAKE_CASE = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return self._id_to_token.get(__snake_case ,self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
return self._id_to_token.get(__snake_case ,self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List ,lowerCamelCase__ : Optional[List] = None ,lowerCamelCase__ : bool = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE = [1] + ([0] * len(__snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(__snake_case ) + [1]
return mask
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.join(__snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__snake_case ,"""w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Union[List[str], List[AddedToken]] ,lowerCamelCase__ : bool = False ) -> List[Any]:
'''simple docstring'''
return super()._add_tokens(__snake_case ,special_tokens=__snake_case )
| 296
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 297
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__lowerCamelCase : Optional[int] = ['''accelerate''', '''launch''']
__lowerCamelCase : Optional[int] = Path.home() / '''.cache/huggingface/accelerate'''
__lowerCamelCase : List[str] = '''default_config.yaml'''
__lowerCamelCase : List[Any] = config_folder / config_file
__lowerCamelCase : List[Any] = config_folder / '''_default_config.yaml'''
__lowerCamelCase : Optional[int] = Path('''tests/test_configs''' )
@classmethod
def a_ ( cls : Optional[Any] ) -> Dict:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a_ ( cls : Dict ) -> List[str]:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=__lowerCAmelCase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(__lowerCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = '''test-tpu'''
__lowerCamelCase : List[str] = '''us-central1-a'''
__lowerCamelCase : List[str] = '''ls'''
__lowerCamelCase : List[str] = ['''accelerate''', '''tpu-config''']
__lowerCamelCase : Tuple = '''cd /usr/share'''
__lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
__lowerCamelCase : Optional[int] = '''Running gcloud compute tpus tpu-vm ssh'''
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __lowerCAmelCase , )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __lowerCAmelCase , )
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=__lowerCAmelCase )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __lowerCAmelCase , )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
A__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __lowerCAmelCase , )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , __lowerCAmelCase , )
def a_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __lowerCAmelCase , )
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __lowerCAmelCase , )
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , __lowerCAmelCase , )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , __lowerCAmelCase , )
| 358
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = '''gelu'''
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=__lowerCAmelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :List[Any] , __a :List[str]=None , __a :List[Any]=None , __a :Optional[Any]=None , __a :List[str]=None , __a :int=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Tuple = False
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__lowerCamelCase : Optional[int] = '''facebook/blenderbot_small-90M'''
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 276
| 0
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 200_0000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [0 for i in range(n + 1 )]
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Any = 1
_SCREAMING_SNAKE_CASE : List[Any] = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 200
|
'''simple docstring'''
import argparse
import struct
import unittest
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = data
# Initialize hash values
_SCREAMING_SNAKE_CASE : Tuple = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
_SCREAMING_SNAKE_CASE : int = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
_SCREAMING_SNAKE_CASE : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase_ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = B"""\x80""" + (B"""\x00""" * (63 - (len(__snake_case ) + 8) % 64))
_SCREAMING_SNAKE_CASE : List[str] = struct.pack(""">Q""" , (len(__snake_case ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase_ ( self ):
# Convert into blocks of 64 bytes
_SCREAMING_SNAKE_CASE : Any = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_SCREAMING_SNAKE_CASE : List[Any] = list(struct.unpack(""">16L""" , __snake_case ) )
# add 48 0-ed integers
words += [0] * 48
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_SCREAMING_SNAKE_CASE : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_SCREAMING_SNAKE_CASE : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_SCREAMING_SNAKE_CASE : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
_SCREAMING_SNAKE_CASE : Any = self.ror(__snake_case , 6 ) ^ self.ror(__snake_case , 11 ) ^ self.ror(__snake_case , 25 )
_SCREAMING_SNAKE_CASE : str = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
_SCREAMING_SNAKE_CASE : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
_SCREAMING_SNAKE_CASE : Dict = self.ror(__snake_case , 2 ) ^ self.ror(__snake_case , 13 ) ^ self.ror(__snake_case , 22 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
_SCREAMING_SNAKE_CASE : Dict = (sa + maj) % 0X1_0000_0000
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_SCREAMING_SNAKE_CASE : Tuple = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
_SCREAMING_SNAKE_CASE : Dict = """""".join([hex(__snake_case )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
import hashlib
_SCREAMING_SNAKE_CASE : Tuple = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__snake_case ).hash , hashlib.shaaaa(__snake_case ).hexdigest() )
def snake_case_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.read()
else:
_SCREAMING_SNAKE_CASE : List[Any] = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main()
| 200
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 363
|
'''simple docstring'''
def _snake_case ( A = 10 ) -> str:
if not isinstance(A , A ) or n < 0:
raise ValueError('''Invalid input''' )
lowerCAmelCase__ = 10**n
lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 228
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self :Dict ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCAmelCase__ ( self :List[str] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
UpperCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_ , unet=self.dummy_unet , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
UpperCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase = pipe(generator=SCREAMING_SNAKE_CASE_ , steps=4 , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
UpperCAmelCase = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = self.dummy_vqvae_and_unet
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase = pipe(raw_audio=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , start_step=5 , steps=10 )
UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCAmelCase = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = self.dummy_unet_condition
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase = torch.rand((1, 1, 10) )
UpperCAmelCase = pipe(generator=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = output.images[0]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCAmelCase = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = torch_device
UpperCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
UpperCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase = pipe(generator=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
UpperCAmelCase = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 78
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : List[str] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('swish' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('silu' )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = get_activation('mish' )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = get_activation('gelu' )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =KandinskyInpaintPipeline
a : List[str] =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a : Union[str, Any] =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a : str =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Optional[int] =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase : List[str] = MultilingualCLIP(__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.dummy_text_encoder
lowerCAmelCase : Optional[Any] = self.dummy_tokenizer
lowerCAmelCase : List[Any] = self.dummy_unet
lowerCAmelCase : str = self.dummy_movq
lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__lowerCamelCase , )
lowerCAmelCase : str = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase : Union[str, Any] = 0
if str(__lowerCamelCase ).startswith("mps" ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase : List[Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCAmelCase : str = output.images
lowerCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Any = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = "a hat"
lowerCAmelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCAmelCase : Optional[int] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Dict = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Optional[int] = pipeline(
__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 108
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _snake_case ( _lowercase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=13 , __lowerCamelCase: Any=7 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: int=5_12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : List[str] = embedding_size
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = TFMobileBertModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : Dict = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(__lowerCamelCase )[0]
__UpperCAmelCase : Any = [1, 6, 3_05_22]
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : str = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 157
| 0
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
lowercase = []
for line in lines:
lowercase = re.sub(R"""#.*""" , """""" , lowerCAmelCase__ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase__ )
lowercase = """\n""".join(lowerCAmelCase__ )
# Make a hash from all this code
lowercase = full_str.encode("""utf-8""" )
return shaaaa(lowerCAmelCase__ ).hexdigest()
# get importable module names and hash for caching
__lowerCAmelCase : Any ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCAmelCase : Union[str, Any] ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCAmelCase : List[Any] ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__lowerCAmelCase : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 359
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE_:int = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = list(s_dict.keys() )
for key in keys:
A : List[Any] = R""".*/layers_(\d+)"""
A : str = key
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , _lowerCAmelCase )
A : Optional[Any] = R"""(encoder|decoder)\/"""
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
A : Optional[int] = re.match(_lowerCAmelCase , _lowerCAmelCase ).groups()
if groups[0] == "encoder":
A : Optional[Any] = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , _lowerCAmelCase )
A : List[str] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , _lowerCAmelCase )
elif groups[0] == "decoder":
A : Any = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , _lowerCAmelCase )
A : Tuple = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , _lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A : Optional[int] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f'''{key} -> {new_key}''' )
A : Union[str, Any] = s_dict.pop(_lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A : List[str] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A : List[Any] = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A : Dict = s_dict[key].shape[0]
A : Dict = s_dict[key]
for idx in range(_lowerCAmelCase ):
A : Optional[Any] = expert_weihts[idx]
print(f'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(_lowerCAmelCase )
return s_dict
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
import regex as re
with open(_lowerCAmelCase , """r""" ) as f:
A : Optional[int] = f.read()
A : Union[str, Any] = re.findall(R"""(.*) = ([0-9.]*)""" , _lowerCAmelCase )
A : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A : Union[str, Any] = float(_lowerCAmelCase ) if """.""" in value else int(_lowerCAmelCase )
A : List[str] = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , _lowerCAmelCase )[0]
A : Optional[int] = str(activation[1] )
A : Dict = num_experts
A : int = SwitchTransformersConfig(**_lowerCAmelCase )
return config
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="./" , _lowerCAmelCase=8 ) -> Optional[int]:
"""simple docstring"""
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
A : str = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
if gin_file is not None:
A : Union[str, Any] = convert_gin_to_config(_lowerCAmelCase , _lowerCAmelCase )
else:
A : Tuple = SwitchTransformersConfig.from_pretrained(_lowerCAmelCase )
A : Optional[int] = SwitchTransformersForConditionalGeneration(_lowerCAmelCase )
A : str = flax_params["""target"""]
A : Dict = flatten_dict(_lowerCAmelCase , sep="""/""" )
A : Dict = rename_keys(_lowerCAmelCase )
A : str = unflatten_dict(_lowerCAmelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 356
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : int = []
for line in lines:
A : int = re.sub(R"""#.*""" , """""" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
A : Tuple = """\n""".join(_lowerCAmelCase )
# Make a hash from all this code
A : Union[str, Any] = full_str.encode("""utf-8""" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE_:List[Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE_:Optional[Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE_:Optional[int] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE_:Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 115
| 0
|
__lowerCamelCase : Optional[Any] = tuple[float, float, float]
__lowerCamelCase : int = tuple[float, float, float]
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vectorad:
UpperCamelCase : Optional[Any] = end_pointa[0] - end_pointa[0]
UpperCamelCase : Optional[int] = end_pointa[1] - end_pointa[1]
UpperCamelCase : Tuple = end_pointa[2] - end_pointa[2]
return (x, y, z)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vectorad:
UpperCamelCase : Union[str, Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase : Tuple = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return tuple(round(_lowerCAmelCase , _lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 ) -> bool:
UpperCamelCase : Tuple = create_vector(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = create_vector(_lowerCAmelCase , _lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
| 52
|
def __A ( __lowerCamelCase ) -> int:
a = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
a = hex_num[0] == """-"""
if is_negative:
a = hex_num[1:]
try:
a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
a = """"""
while int_num > 0:
a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : int = KandinskyVaaControlnetImgaImgPipeline
_lowercase : List[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : Optional[int] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : str = False
@property
def _lowercase ( self ):
"""simple docstring"""
return 32
@property
def _lowercase ( self ):
"""simple docstring"""
return 32
@property
def _lowercase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase ( self ):
"""simple docstring"""
return 100
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**_lowercase )
return model
@property
def _lowercase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**_lowercase )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowercase )
_lowerCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = init_image.resize((512, 512) )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_lowerCAmelCase = torch.from_numpy(np.array(_lowercase ) ).float() / 255.0
_lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase = """A robot, 4k photo"""
_lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
_lowercase , image=_lowercase , strength=0.85 , generator=_lowercase , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 359
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase = [0.0] * self.order
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if len(_lowercase ) < self.order:
_lowerCAmelCase = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
_lowerCAmelCase = a_coeffs
_lowerCAmelCase = b_coeffs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase = self.input_history[:-1]
_lowerCAmelCase = self.output_history[:-1]
_lowerCAmelCase = sample
_lowerCAmelCase = result
return result
| 229
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'poolformer'
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=4.0 , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[64, 128, 320, 512] , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[2, 1, 1, 1] , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_="gelu" , UpperCamelCase_=True , UpperCamelCase_=1e-5 , UpperCamelCase_=0.02 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :Optional[Any] = patch_size
UpperCamelCase__ :List[Any] = stride
UpperCamelCase__ :Any = padding
UpperCamelCase__ :List[str] = pool_size
UpperCamelCase__ :str = hidden_sizes
UpperCamelCase__ :Union[str, Any] = mlp_ratio
UpperCamelCase__ :int = depths
UpperCamelCase__ :str = patch_sizes
UpperCamelCase__ :Tuple = strides
UpperCamelCase__ :Dict = num_encoder_blocks
UpperCamelCase__ :int = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :str = use_layer_scale
UpperCamelCase__ :List[str] = layer_scale_init_value
UpperCamelCase__ :int = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 2e-3
| 97
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if len(__UpperCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_lowerCAmelCase =_lowerCAmelCase =sum(array[:k] )
for i in range(len(__UpperCamelCase ) - k ):
_lowerCAmelCase =current_sum - array[i] + array[i + k]
_lowerCAmelCase =max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A = [randint(-1000, 1000) for i in range(100)]
__A = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 341
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341
| 1
|
"""simple docstring"""
def __A ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1)) for i in range(10_00)]
A = generate_large_matrix()
A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __A ( a_ :list[list[int]]) -> None:
assert all(row == sorted(__A , reverse=__A) for row in grid)
assert all(list(__A) == sorted(__A , reverse=__A) for col in zip(*__A))
def __A ( a_ :list[int]) -> int:
__a : str = 0
__a : Any = len(__A) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__a : Any = (left + right) // 2
__a : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__a : Optional[Any] = mid + 1
else:
__a : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__A)
def __A ( a_ :list[list[int]]) -> int:
__a : int = 0
__a : Optional[int] = len(grid[0])
for i in range(len(__A)):
__a : int = find_negative_index(grid[i][:bound])
total += bound
return (len(__A) * len(grid[0])) - total
def __A ( a_ :list[list[int]]) -> int:
return len([number for row in grid for number in row if number < 0])
def __A ( a_ :list[list[int]]) -> int:
__a : Tuple = 0
for row in grid:
for i, number in enumerate(__A):
if number < 0:
total += len(__A) - i
break
return total
def __A ( ) -> None:
from timeit import timeit
print('''Running benchmarks''')
__a : str = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__a : Any = timeit(F"""{func}(grid=grid)""" , setup=__A , number=5_00)
print(F"""{func}() took {time:0.4f} seconds""")
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 160
|
import math
import flax.linen as nn
import jax.numpy as jnp
def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a_ : int = float(embedding_dim // 2 )
a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment )
a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 )
# scale embeddings
a_ : str = scale * emb
if flip_sin_to_cos:
a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 )
else:
a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 )
a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int = 32
snake_case__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ )
a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ )
return temb
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int = 32
snake_case__ : bool = False
snake_case__ : float = 1
@nn.compact
def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 32
| 0
|
# flake8: noqa
# Lint as: python3
__A : List[str] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
'''simple docstring'''
class A__ :
def __init__( self ) -> Any:
'''simple docstring'''
A_ = {}
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ , """ -> """ , """ -> """.join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
A_ = [to_vertex]
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = True
print(UpperCamelCase__ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 162
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115
| 0
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]:
# Initialise PyTorch model
__UpperCAmelCase : Any = XLNetConfig.from_json_file(snake_case__ )
__UpperCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__UpperCAmelCase : Dict = finetuning_task
__UpperCAmelCase : Dict = GLUE_TASKS_NUM_LABELS[finetuning_task]
__UpperCAmelCase : Optional[Any] = XLNetForSequenceClassification(snake_case__ )
elif "squad" in finetuning_task:
__UpperCAmelCase : Union[str, Any] = finetuning_task
__UpperCAmelCase : List[str] = XLNetForQuestionAnswering(snake_case__ )
else:
__UpperCAmelCase : Union[str, Any] = XLNetLMHeadModel(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case__, snake_case__, snake_case__ )
# Save pytorch-model
__UpperCAmelCase : Optional[Any] = os.path.join(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = os.path.join(snake_case__, snake_case__ )
print(f'''Save PyTorch model to {os.path.abspath(snake_case__ )}''' )
torch.save(model.state_dict(), snake_case__ )
print(f'''Save configuration file to {os.path.abspath(snake_case__ )}''' )
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 342
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342
| 1
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = list[tuple[int, int]]
SCREAMING_SNAKE_CASE__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : Node | None , ) -> List[str]:
"""simple docstring"""
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
def a__ ( self : List[str] ) -> float:
"""simple docstring"""
__lowercase = abs(self.pos_x - self.goal_x )
__lowercase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[str] , _UpperCAmelCase : Dict ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : tuple[int, int] , _UpperCAmelCase : tuple[int, int] ) -> Tuple:
"""simple docstring"""
__lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE__ )
__lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE__ )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def a__ ( self : List[Any] ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowercase = True
return self.retrace_path(SCREAMING_SNAKE_CASE__ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE__ )
__lowercase = self.get_successors(SCREAMING_SNAKE_CASE__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE__ )
if not self.reached:
return [self.start.pos]
return None
def a__ ( self : Any , _UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE__ , ) )
return successors
def a__ ( self : List[str] , _UpperCAmelCase : Node | None ) -> Path:
"""simple docstring"""
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
SCREAMING_SNAKE_CASE__ = GreedyBestFirst(init, goal)
SCREAMING_SNAKE_CASE__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
SCREAMING_SNAKE_CASE__ = 2
for elem in grid:
print(elem)
| 325
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = 3_84
__lowerCAmelCase = 7
if "tiny" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 6, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCAmelCase = 1_28
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
__lowerCAmelCase = 12
__lowerCAmelCase = 5_12
elif "large" in model_name:
__lowerCAmelCase = 1_92
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
__lowerCAmelCase = 12
__lowerCAmelCase = 7_68
# set label information
__lowerCAmelCase = 1_50
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """ade20k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__lowerCAmelCase = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def UpperCamelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = dct.pop(snake_case_ )
__lowerCAmelCase = val
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , 4 , in_channel // 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , in_channel // 4 , 4 )
__lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__lowerCAmelCase = model_name_to_url[model_name]
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" , file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
__lowerCAmelCase = get_upernet_config(snake_case_ )
__lowerCAmelCase = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(snake_case_ )
if "bn" in key:
__lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
__lowerCAmelCase = val
# rename keys
__lowerCAmelCase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
__lowerCAmelCase = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
__lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("""RGB""" )
__lowerCAmelCase = SegformerImageProcessor()
__lowerCAmelCase = processor(snake_case_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 229
| 0
|
from collections.abc import Callable
import numpy as np
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = int(np.ceil((x_end - xa) / step_size ) )
_A = np.zeros((n + 1,) )
_A = ya
_A = xa
for k in range(_lowercase ):
_A = y[k] + step_size * ode_func(_lowercase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__A = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 75
| 1
|
from math import factorial
def SCREAMING_SNAKE_CASE_ ( __A : int = 20 ) -> int:
"""simple docstring"""
a_ : str = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a_ : Dict = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : Any = 1
__snake_case : Optional[int] = 1
__snake_case : List[Any] = {1: 1}
for inputa in range(2 , __SCREAMING_SNAKE_CASE ):
__snake_case : str = 0
__snake_case : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case : List[Any] = counter
if counter > pre_counter:
__snake_case : Tuple = inputa
__snake_case : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 20
|
import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_flip_channel_order
def __a ( self , a , a , a = PIL.Image.BILINEAR , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a = None ):
return flip_channel_order(a , data_format=a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ = [self.flip_channel_order(image=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 80
|
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCamelCase__ = '\nHuman: <<task>>\n\nAssistant: '
lowerCamelCase__ = 'huggingface-tools/default-prompts'
lowerCamelCase__ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="run" ):
if prompt_or_repo_id is None:
_UpperCAmelCase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Union[str, Any] = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__magic_name__: str = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A, _A=None ):
"""simple docstring"""
__magic_name__ : Optional[Any] = XLNetConfig.from_json_file(_A )
__magic_name__ : str = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
__magic_name__ : List[Any] = finetuning_task
__magic_name__ : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
__magic_name__ : Optional[Any] = XLNetForSequenceClassification(_A )
elif "squad" in finetuning_task:
__magic_name__ : Optional[Any] = finetuning_task
__magic_name__ : Union[str, Any] = XLNetForQuestionAnswering(_A )
else:
__magic_name__ : List[str] = XLNetLMHeadModel(_A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_A, _A, _A )
# Save pytorch-model
__magic_name__ : Tuple = os.path.join(_A, _A )
__magic_name__ : List[Any] = os.path.join(_A, _A )
print(f'Save PyTorch model to {os.path.abspath(_A )}' )
torch.save(model.state_dict(), _A )
print(f'Save configuration file to {os.path.abspath(_A )}' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
__magic_name__: str = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 342
|
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = '''lxmert'''
UpperCAmelCase__ : str = {}
def __init__( self :int ,__snake_case :Union[str, Any]=3_05_22 ,__snake_case :Tuple=7_68 ,__snake_case :int=12 ,__snake_case :Optional[Any]=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :List[str]=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[Any]="gelu" ,__snake_case :str=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Optional[Any]=5_12 ,__snake_case :List[str]=2 ,__snake_case :List[Any]=0.02 ,__snake_case :int=1E-12 ,__snake_case :Union[str, Any]=9 ,__snake_case :Tuple=5 ,__snake_case :List[Any]=5 ,__snake_case :Tuple=20_48 ,__snake_case :Optional[int]=4 ,__snake_case :Union[str, Any]=6.67 ,__snake_case :Dict=True ,__snake_case :Tuple=True ,__snake_case :str=True ,__snake_case :Dict=True ,__snake_case :str=True ,__snake_case :Union[str, Any]=True ,__snake_case :Tuple=True ,**__snake_case :List[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 109
|
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ):
a__ = 0
a__ = len(__lowerCAmelCase ) # No of vertices in graph
a__ = [0] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = True
a__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
a__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ = min(low[at] , low[to] )
a__ = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 1
|
'''simple docstring'''
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =[[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase_ =1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a_ : str = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
a_ : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 75
|
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__snake_case ) ):
lowerCamelCase_ =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__snake_case )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__snake_case , key=__snake_case , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 75
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : int = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = AlbertTokenizer
__SCREAMING_SNAKE_CASE = AlbertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = AlbertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , lowercase ) -> Tuple:
__UpperCamelCase = """this is a test"""
__UpperCamelCase = """this is a test"""
return input_text, output_text
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = """<pad>"""
__UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(lowercase ) , 3_0_0_0_0 )
def __lowerCamelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCamelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = """I was born in 92000, and this is falsé."""
__UpperCamelCase = tokenizer.tokenize(lowercase )
__UpperCamelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(lowercase )
__UpperCamelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = AlbertTokenizer(lowercase , keep_accents=lowercase )
__UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [4_8, 2_5, 2_1, 1_2_8_9] )
__UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = AlbertTokenizer(lowercase )
__UpperCamelCase = tokenizer.encode("""sequence builders""" )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCamelCase ( self ) -> List[str]:
# fmt: off
__UpperCamelCase = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 243
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
_a : int= "pixel_values"
_a : List[str]= False
_a : Union[str, Any]= TimmBackboneConfig
def __init__( self ,snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,"""timm""" )
super().__init__(snake_case )
lowercase : int = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(snake_case ,"""out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowercase : str = getattr(snake_case ,"""use_pretrained_backbone""" ,snake_case )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase : Optional[int] = config.out_indices if getattr(snake_case ,"""out_indices""" ,snake_case ) is not None else (-1,)
lowercase : List[Any] = timm.create_model(
config.backbone ,pretrained=snake_case ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=snake_case ,**snake_case ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase : Optional[int] = self._backbone.return_layers
lowercase : Tuple = {layer["""module"""]: str(snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase : Tuple = kwargs.pop("""config""" ,TimmBackboneConfig() )
lowercase : int = kwargs.pop("""use_timm_backbone""" ,snake_case )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowercase : Any = kwargs.pop("""num_channels""" ,config.num_channels )
lowercase : List[Any] = kwargs.pop("""features_only""" ,config.features_only )
lowercase : Any = kwargs.pop("""use_pretrained_backbone""" ,config.use_pretrained_backbone )
lowercase : Dict = kwargs.pop("""out_indices""" ,config.out_indices )
lowercase : Any = TimmBackboneConfig(
backbone=snake_case ,num_channels=snake_case ,features_only=snake_case ,use_pretrained_backbone=snake_case ,out_indices=snake_case ,)
return super()._from_config(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase : str = self._all_layers
lowercase : Any = self._backbone(snake_case ,**snake_case )
lowercase : List[str] = self._return_layers
lowercase : List[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase : List[Any] = self._backbone(snake_case ,**snake_case )
lowercase : Tuple = None
lowercase : Any = tuple(snake_case )
lowercase : Dict = tuple(snake_case ) if hidden_states is not None else None
if not return_dict:
lowercase : Union[str, Any] = (feature_maps,)
if output_hidden_states:
lowercase : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case ,hidden_states=snake_case ,attentions=snake_case )
| 20
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : str = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
lowercase : Dict = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
lowercase : int = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return float((preds == labels).mean() )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Any = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
lowercase : Dict = float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case ,snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case ,snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case ,snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case ,snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20
| 1
|
'''simple docstring'''
def lowercase (_A = 2_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
_lowerCAmelCase : List[str] = [0] * (pence + 1)
_lowerCAmelCase : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 363
|
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_a = '''
Human: <<task>>
Assistant: '''
_a = '''huggingface-tools/default-prompts'''
_a = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
__lowerCAmelCase: Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
__lowerCAmelCase: Optional[Any] = cached_file(
SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 322
|
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = BlenderbotSmallTokenizer
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
super().setUp()
UpperCAmelCase_= ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
UpperCAmelCase_= dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase_= ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
UpperCAmelCase_= {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
UpperCAmelCase_= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase_= """adapt act apte"""
UpperCAmelCase_= """adapt act apte"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_= """adapt act apte"""
UpperCAmelCase_= ["""adapt""", """act""", """ap@@""", """te"""]
UpperCAmelCase_= tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_= [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_= BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1_384]
UpperCAmelCase_= """I am a small frog."""
UpperCAmelCase_= tok([src_text] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["""input_ids"""]
UpperCAmelCase_= tok.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_= BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
UpperCAmelCase_= """I am a small frog ."""
UpperCAmelCase_= """."""
UpperCAmelCase_= tok(__UpperCAmelCase )["""input_ids"""]
UpperCAmelCase_= tok(__UpperCAmelCase )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 352
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277
| 0
|
"""simple docstring"""
A: Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A: List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
A: Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 109
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A: List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = """weight"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any ):
UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Dict = name.split(""".""" )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=None ):
# load the pre-trained checkpoints
UpperCAmelCase : List[Any] = torch.load(UpperCamelCase )
UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase : Optional[int] = WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(UpperCamelCase )
else:
UpperCAmelCase : List[Any] = WavLMConfig()
UpperCAmelCase : Any = WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A: Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 109
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : Union[int, Iterable[int]] , __lowercase : bool , __lowercase : int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=0 , __lowercase : Dict=None ):
_UpperCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCAmelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCAmelCase = (output_size, output_size) if isinstance(__lowercase , __lowercase ) else output_size
_UpperCAmelCase , _UpperCAmelCase = get_image_size(__lowercase )
_UpperCAmelCase , _UpperCAmelCase = output_size
# determine new height and width
_UpperCAmelCase = output_height / input_height
_UpperCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCAmelCase = scale_width
else:
# fit height
_UpperCAmelCase = scale_height
_UpperCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__lowercase )
_UpperCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__lowercase )
return (new_height, new_width)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = ["""pixel_values"""]
def __init__( self : str , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
_UpperCAmelCase = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(
snake_case_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case_ , multiple=snake_case_ , )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def lowercase ( self : int , snake_case_ : str , snake_case_ : List[Tuple] = None ):
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case_ ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(snake_case_ ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case_ )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : Union[int, Iterable[int]] , __lowercase : bool , __lowercase : int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=0 , __lowercase : Dict=None ):
_UpperCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCAmelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCAmelCase = (output_size, output_size) if isinstance(__lowercase , __lowercase ) else output_size
_UpperCAmelCase , _UpperCAmelCase = get_image_size(__lowercase )
_UpperCAmelCase , _UpperCAmelCase = output_size
# determine new height and width
_UpperCAmelCase = output_height / input_height
_UpperCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCAmelCase = scale_width
else:
# fit height
_UpperCAmelCase = scale_height
_UpperCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__lowercase )
_UpperCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__lowercase )
return (new_height, new_width)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = ["""pixel_values"""]
def __init__( self : str , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : bool = False , snake_case_ : int = 1 , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
_UpperCAmelCase = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(
snake_case_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case_ , multiple=snake_case_ , )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ )
_UpperCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def lowercase ( self : int , snake_case_ : str , snake_case_ : List[Tuple] = None ):
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case_ ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(snake_case_ ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case_ )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = load_tool("text-question-answering")
self.tool.setup()
a_ = load_tool("text-question-answering" , remote=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.tool(__UpperCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__UpperCAmelCase , "launched the BigScience Research Workshop")
def UpperCAmelCase__ ( self) ->str:
a_ = self.remote_tool(__UpperCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__UpperCAmelCase , "launched the BigScience Research Workshop")
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.tool(text=__UpperCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__UpperCAmelCase , "launched the BigScience Research Workshop")
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.remote_tool(text=__UpperCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__UpperCAmelCase , "launched the BigScience Research Workshop")
| 243
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 243
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : list[Any] = []
lowercase__ : int = 0
lowercase__ : int = 0
def snake_case_ ( self):
return self.head == self.tail
def snake_case_ ( self , a):
self.data.append(a)
lowercase__ : int = self.tail + 1
def snake_case_ ( self):
lowercase__ : str = self.data[self.head]
lowercase__ : Tuple = self.head + 1
return ret
def snake_case_ ( self):
return self.tail - self.head
def snake_case_ ( self):
print(self.data)
print('**************')
print(self.data[self.head : self.tail])
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a):
lowercase__ : Union[str, Any] = data
lowercase__ : MyNode | None = None
lowercase__ : MyNode | None = None
lowercase__ : int = 1
def snake_case_ ( self):
return self.data
def snake_case_ ( self):
return self.left
def snake_case_ ( self):
return self.right
def snake_case_ ( self):
return self.height
def snake_case_ ( self , a):
lowercase__ : Dict = data
def snake_case_ ( self , a):
lowercase__ : Tuple = node
def snake_case_ ( self , a):
lowercase__ : List[str] = node
def snake_case_ ( self , a):
lowercase__ : Optional[int] = height
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if a > b:
return a
return b
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
print('left rotation node:' , node.get_data() )
lowercase__ : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE_ )
return ret
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
print('right rotation node:' , node.get_data() )
lowercase__ : Optional[int] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(SCREAMING_SNAKE_CASE_ )
return ret
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
lowercase__ : Any = node.get_left()
assert left_child is not None
node.set_left(left_rotation(SCREAMING_SNAKE_CASE_ ) )
return right_rotation(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
lowercase__ : Any = node.get_right()
assert right_child is not None
node.set_right(right_rotation(SCREAMING_SNAKE_CASE_ ) )
return left_rotation(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode | None , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
if node is None:
return MyNode(SCREAMING_SNAKE_CASE_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , SCREAMING_SNAKE_CASE_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase__ : Optional[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase__ : str = right_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Any = lr_rotation(SCREAMING_SNAKE_CASE_ )
else:
node.set_right(insert_node(node.get_right() , SCREAMING_SNAKE_CASE_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase__ : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase__ : List[str] = rl_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[int] = left_rotation(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(SCREAMING_SNAKE_CASE_ )
return node
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
while True:
lowercase__ : int = root.get_right()
if right_child is None:
break
lowercase__ : List[str] = right_child
return root.get_data()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode ):
'''simple docstring'''
while True:
lowercase__ : Any = root.get_left()
if left_child is None:
break
lowercase__ : List[Any] = left_child
return root.get_data()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : MyNode , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Union[str, Any] = root.get_left()
lowercase__ : Any = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase__ : str = get_left_most(SCREAMING_SNAKE_CASE_ )
root.set_data(SCREAMING_SNAKE_CASE_ )
root.set_right(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
elif left_child is not None:
lowercase__ : List[Any] = left_child
elif right_child is not None:
lowercase__ : int = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if get_height(SCREAMING_SNAKE_CASE_ ) - get_height(SCREAMING_SNAKE_CASE_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase__ : Any = left_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Union[str, Any] = rl_rotation(SCREAMING_SNAKE_CASE_ )
elif get_height(SCREAMING_SNAKE_CASE_ ) - get_height(SCREAMING_SNAKE_CASE_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase__ : Any = right_rotation(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : int = lr_rotation(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(SCREAMING_SNAKE_CASE_ )
return root
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : MyNode | None = None
def snake_case_ ( self):
return get_height(self.root)
def snake_case_ ( self , a):
print('insert:' + str(a))
lowercase__ : str = insert_node(self.root , a)
def snake_case_ ( self , a):
print('delete:' + str(a))
if self.root is None:
print('Tree is empty!')
return
lowercase__ : Optional[Any] = del_node(self.root , a)
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
lowercase__ : str = ''
lowercase__ : Union[str, Any] = MyQueue()
q.push(self.root)
lowercase__ : int = self.get_height()
if layer == 0:
return output
lowercase__ : str = 0
while not q.is_empty():
lowercase__ : List[Any] = q.pop()
lowercase__ : int = ' ' * int(math.pow(2 , layer - 1))
output += space
if node is None:
output += "*"
q.push(a)
q.push(a)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
lowercase__ : List[Any] = cnt + 1
for i in range(100):
if cnt == math.pow(2 , a) - 1:
lowercase__ : str = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case_ = AVLtree()
snake_case_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 357
|
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ):
'''simple docstring'''
lowercase__ : Tuple = x_start
lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ : Any = (x_end - x_start) / steps + xa
lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ : Any = xa
lowercase__ : str = fxa
return area
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
snake_case_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 216
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : str = 'http://www.mocksite.com/file1.txt'
A : List[str] = '"text": ["foo", "foo"]'
A : List[Any] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _lowercase :
"""simple docstring"""
A__ = 2_00
A__ = {'''Content-Length''': '''100'''}
A__ = {}
def lowerCAmelCase ( self : Dict , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
return [bytes(SCREAMING_SNAKE_CASE__ , "utf-8" )]
def lowercase_ ( *_A : Dict , **_A : Optional[Any] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def lowercase_ ( _A : Union[str, Any] , _A : int , _A : str ):
"""simple docstring"""
import requests
monkeypatch.setattr(_snake_case , "request" , _snake_case )
lowerCamelCase__ : str = URL
if issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = url
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Union[str, Any] = [url]
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : int = {"""train""": url}
lowerCamelCase__ : Tuple = """dummy"""
lowerCamelCase__ : Dict = """downloads"""
lowerCamelCase__ : List[Any] = tmp_path
lowerCamelCase__ : Tuple = DownloadConfig(
cache_dir=os.path.join(_snake_case , _snake_case ) , use_etag=_snake_case , )
lowerCamelCase__ : Any = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
lowerCamelCase__ : Tuple = dl_manager.download(_snake_case )
lowerCamelCase__ : Optional[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = [downloaded_paths]
lowerCamelCase__ : Union[str, Any] = [urls]
elif isinstance(_snake_case , _snake_case ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ : Dict = downloaded_paths.values()
lowerCamelCase__ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case , _snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ : Dict = Path(_snake_case )
lowerCamelCase__ : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ : Optional[int] = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
lowerCamelCase__ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def lowercase_ ( _A : Optional[int] , _A : int , _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict = str(_snake_case )
if issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Optional[int] = filename
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : int = [filename]
elif issubclass(_snake_case , _snake_case ):
lowerCamelCase__ : Union[str, Any] = {"""train""": filename}
lowerCamelCase__ : List[Any] = """dummy"""
lowerCamelCase__ : List[str] = xz_file.parent
lowerCamelCase__ : Dict = """extracted"""
lowerCamelCase__ : Optional[int] = DownloadConfig(
cache_dir=_snake_case , use_etag=_snake_case , )
lowerCamelCase__ : Tuple = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
lowerCamelCase__ : Dict = dl_manager.extract(_snake_case )
lowerCamelCase__ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : int = [extracted_paths]
lowerCamelCase__ : Optional[Any] = [paths]
elif isinstance(_snake_case , _snake_case ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ : str = extracted_paths.values()
lowerCamelCase__ : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case , _snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ : Tuple = Path(_snake_case )
lowerCamelCase__ : List[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case , etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ : Union[str, Any] = extracted_path.read_text()
lowerCamelCase__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase_ ( _A : Union[str, Any] , _A : int ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_snake_case , start=1 ):
lowerCamelCase__ : List[str] = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def lowercase_ ( _A : List[str] , _A : Any ):
"""simple docstring"""
lowerCamelCase__ : str = request.getfixturevalue(_snake_case )
lowerCamelCase__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def lowercase_ ( _A : Tuple , _A : str ):
"""simple docstring"""
lowerCamelCase__ : Dict = request.getfixturevalue(_snake_case )
lowerCamelCase__ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) , start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 184
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25
| 0
|
"""simple docstring"""
import pytest
lowercase__ : Optional[Any] = """__dummy_dataset1__"""
lowercase__ : Any = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = dataset_loading_script_name
lowerCAmelCase_ : List[Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = script_dir / f"{script_name}.py"
with open(lowerCAmelCase__ , 'w' ) as f:
f.write(lowerCAmelCase__ )
return str(lowerCAmelCase__ )
| 289
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase_ : List[Any] = 0
for chara, chara in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a , a = True , a = False ) -> int:
snake_case_ = scheduler
snake_case_ = optimizers if isinstance(_snake_case , (list, tuple) ) else [optimizers]
snake_case_ = split_batches
snake_case_ = step_with_optimizer
snake_case_ = GradientState()
def _UpperCamelCase ( self , *a , **a ) -> Tuple:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_snake_case , **_snake_case )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_snake_case , **_snake_case )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case_ = AcceleratorState().num_processes
for _ in range(_snake_case ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_snake_case , **_snake_case )
else:
self.scheduler.step(*_snake_case , **_snake_case )
def _UpperCamelCase ( self ) -> List[str]:
return self.scheduler.get_last_lr()
def _UpperCamelCase ( self ) -> int:
return self.scheduler.state_dict()
def _UpperCamelCase ( self , a ) -> Optional[int]:
self.scheduler.load_state_dict(_snake_case )
def _UpperCamelCase ( self ) -> Union[str, Any]:
return self.scheduler.get_lr()
def _UpperCamelCase ( self , *a , **a ) -> Optional[Any]:
return self.scheduler.print_lr(*_snake_case , **_snake_case )
| 178
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ (A : str , A : List[Any] , A : Any ):
# Initialise PyTorch model
snake_case__ : List[Any] = LxmertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : List[str] = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 277
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase : Tuple = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase : str = [0, 25, 50]
lowercase : List[str] = [25, 50, 75]
lowercase : Any = fuzz.membership.trimf(X, abca)
lowercase : Union[str, Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase : Optional[Any] = np.ones(75)
lowercase : List[str] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase : List[str] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase : Optional[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 352
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = '▁'
lowercase : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase : Optional[int] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowercase : List[Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
lowercase : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ['input_ids', 'attention_mask']
_A = []
_A = []
def __init__( self :Tuple , a :List[str] , a :int , a :Dict=None , a :List[Any]=None , a :List[str]="<s>" , a :str="</s>" , a :Dict="</s>" , a :Optional[Any]="<pad>" , a :Union[str, Any]="<unk>" , a :List[Any]="m2m100" , a :Optional[Dict[str, Any]] = None , a :List[str]=8 , **a :Tuple , ) -> None:
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : List[str] = language_codes
__UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a )
for lang_code in fairseq_language_code
if self.get_lang_token(a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : List[str] = load_json(a )
__UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(a , self.sp_model_kwargs )
__UpperCamelCase : int = len(self.encoder )
__UpperCamelCase : Tuple = {
self.get_lang_token(a ): self.encoder_size + i for i, lang_code in enumerate(a )
}
__UpperCamelCase : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a )}
__UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase : int = src_lang if src_lang is not None else "en"
__UpperCamelCase : int = tgt_lang
__UpperCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase : Union[str, Any] = num_madeup_words
@property
def _lowerCamelCase ( self :int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self :Any , a :str ) -> None:
__UpperCamelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self :int , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :List[str] , a :str ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self :List[Any] , a :int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
__UpperCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self :List[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : int = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : int = None
return state
def __setstate__( self :List[Any] , a :Dict ) -> None:
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
__UpperCamelCase : str = Path(a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _lowerCamelCase ( self :Dict , a :List[str] , a :str = "en" , a :Optional[List[str]] = None , a :str = "ro" , **a :Union[str, Any] , ) -> BatchEncoding:
__UpperCamelCase : List[str] = src_lang
__UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self :Union[str, Any] , a :int , a :Optional[str] , a :Optional[str] , **a :List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCamelCase : int = src_lang
__UpperCamelCase : Tuple = self(a , add_special_tokens=a , **a )
__UpperCamelCase : Optional[int] = self.get_lang_id(a )
__UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self :Any ) -> str:
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self :Union[str, Any] , a :str ) -> None:
__UpperCamelCase : str = self.get_lang_token(a )
__UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__UpperCamelCase : Optional[int] = [self.cur_lang_id]
__UpperCamelCase : str = [self.eos_token_id]
def _lowerCamelCase ( self :int , a :str ) -> None:
__UpperCamelCase : Any = self.get_lang_token(a )
__UpperCamelCase : Dict = self.lang_token_to_id[lang_token]
__UpperCamelCase : List[Any] = [self.cur_lang_id]
__UpperCamelCase : Tuple = [self.eos_token_id]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> str:
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> int:
__UpperCamelCase : Dict = self.get_lang_token(a )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**_lowerCamelCase)
spm.Load(str(_lowerCamelCase))
return spm
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[Dict, List]:
'''simple docstring'''
with open(_lowerCamelCase , "r") as f:
return json.load(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : str) -> None:
'''simple docstring'''
with open(_lowerCamelCase , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2)
| 151
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Optional[int] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowercase : Optional[int] = MaskFormerConfig(backbone_config=__lowerCAmelCase )
__lowercase : Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__lowercase : Any = 847
__lowercase : Optional[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__lowercase : Optional[int] = 150
__lowercase : Dict = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__lowercase : str = 171
__lowercase : Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__lowercase : Optional[int] = 133
__lowercase : Any = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__lowercase : Optional[Any] = 19
__lowercase : str = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__lowercase : Tuple = 65
__lowercase : Any = '''mapillary-vistas-id2label.json'''
__lowercase : Tuple = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : int = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
__lowercase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Dict = dct.pop(__lowerCAmelCase )
__lowercase : Optional[Any] = val
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
__lowercase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : str = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase : Dict = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Tuple = in_proj_weight[:dim, :]
__lowercase : List[str] = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : str = in_proj_bias[
dim : dim * 2
]
__lowercase : Tuple = in_proj_weight[
-dim :, :
]
__lowercase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# fmt: off
__lowercase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase : Dict = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Optional[Any] = in_proj_weight[: hidden_size, :]
__lowercase : int = in_proj_bias[:config.hidden_size]
__lowercase : int = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase : str = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase : Optional[int] = in_proj_weight[-hidden_size :, :]
__lowercase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Optional[Any] = in_proj_weight[: hidden_size, :]
__lowercase : Dict = in_proj_bias[:config.hidden_size]
__lowercase : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
__lowercase : Any = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase_ ( ) -> torch.Tensor:
__lowercase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
__lowercase : List[str] = get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , '''rb''' ) as f:
__lowercase : List[Any] = pickle.load(__lowerCAmelCase )
__lowercase : Tuple = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase : List[Any] = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
__lowercase : Optional[Any] = MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
__lowercase , __lowercase : Union[str, Any] = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase : Dict = prepare_img()
if "vistas" in model_name:
__lowercase : List[Any] = 65
elif "cityscapes" in model_name:
__lowercase : Dict = 65_535
else:
__lowercase : int = 255
__lowercase : Union[str, Any] = True if '''ade''' in model_name else False
__lowercase : Dict = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
__lowercase : Dict = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
__lowercase : Union[str, Any] = model(**__lowerCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase : str = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 156
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : torch.FloatTensor
A__ : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowercase : Dict = []
for i in range(__lowerCAmelCase ):
__lowercase : Optional[Any] = i / num_diffusion_timesteps
__lowercase : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = 1
@register_to_config
def __init__( self : str , _snake_case : int = 1000 , _snake_case : float = 0.00_01 , _snake_case : float = 0.02 , _snake_case : str = "linear" , _snake_case : Optional[Union[np.ndarray, List[float]]] = None , _snake_case : bool = True , _snake_case : bool = True , _snake_case : int = 0 , _snake_case : str = "epsilon" , _snake_case : float = 1.0 , **_snake_case : Tuple , ):
if kwargs.get('''set_alpha_to_one''' , _snake_case ) is not None:
__lowercase : str = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case )
__lowercase : Dict = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__lowercase : Optional[int] = torch.tensor(_snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase : Any = torch.linspace(_snake_case , _snake_case , _snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase : Optional[Any] = betas_for_alpha_bar(_snake_case )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowercase : str = 1.0 - self.betas
__lowercase : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__lowercase : str = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__lowercase : Any = 1.0
# setable values
__lowercase : Tuple = None
__lowercase : Tuple = torch.from_numpy(np.arange(0 , _snake_case ).copy().astype(np.intaa ) )
def snake_case_ ( self : List[str] , _snake_case : torch.FloatTensor , _snake_case : Optional[int] = None ):
return sample
def snake_case_ ( self : int , _snake_case : int , _snake_case : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
__lowercase : Optional[Any] = num_inference_steps
__lowercase : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase : List[Any] = (np.arange(0 , _snake_case ) * step_ratio).round().copy().astype(np.intaa )
__lowercase : str = torch.from_numpy(_snake_case ).to(_snake_case )
self.timesteps += self.config.steps_offset
def snake_case_ ( self : int , _snake_case : torch.FloatTensor , _snake_case : int , _snake_case : torch.FloatTensor , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : bool = True , ):
# 1. get previous step value (=t+1)
__lowercase : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__lowercase : Any = self.alphas_cumprod[timestep]
__lowercase : Any = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__lowercase : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__lowercase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__lowercase : str = model_output
elif self.config.prediction_type == "sample":
__lowercase : Any = model_output
__lowercase : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__lowercase : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__lowercase : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__lowercase : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def __len__( self : Any ):
return self.config.num_train_timesteps
| 156
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : int =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["pixel_values"]
def __init__( self : List[Any] , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : bool = True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[str] = size if size is not None else {"height": 384, "width": 384}
lowercase_ :int = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :Tuple = do_resize
lowercase_ :str = size
lowercase_ :Any = resample
lowercase_ :List[str] = do_rescale
lowercase_ :Optional[Any] = rescale_factor
lowercase_ :Optional[int] = do_normalize
lowercase_ :Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ :Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ :str = do_convert_rgb
def lowercase__ ( self : int , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
"""simple docstring"""
lowercase_ :Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
lowercase_ :List[Any] = (size["height"], size["width"])
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[int] , ):
"""simple docstring"""
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : bool = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :Tuple = do_resize if do_resize is not None else self.do_resize
lowercase_ :Tuple = resample if resample is not None else self.resample
lowercase_ :Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ :Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ :List[str] = image_std if image_std is not None else self.image_std
lowercase_ :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ :Optional[int] = size if size is not None else self.size
lowercase_ :Dict = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :int = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ :Optional[Any] = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
lowercase_ :int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
lowercase_ :Any = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
lowercase_ :List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
lowercase_ :Optional[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
lowercase_ :str = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
lowercase_ :Any = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase )
return encoded_outputs
| 147
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 147
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : Optional[Any] = None
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Tuple = "▁"
lowercase__ : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase__ : List[str] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowercase__ : List[str] = {
"google/pegasus-xsum": 512,
}
class UpperCAmelCase ( __lowercase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PegasusTokenizer
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowercase : Dict=None , __lowercase : List[Any]=None , __lowercase : List[Any]="<pad>" , __lowercase : Tuple="</s>" , __lowercase : Any="<unk>" , __lowercase : Dict="<mask_2>" , __lowercase : List[Any]="<mask_1>" , __lowercase : str=None , __lowercase : str=1_03 , **__lowercase : Optional[int] , ):
"""simple docstring"""
snake_case_ = offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(snake_case_ )}, but is"
f" {type(snake_case_ )}" )
snake_case_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ = additional_special_tokens_extended
else:
snake_case_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , pad_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def snake_case__ ( self : List[str] , __lowercase : Any ):
"""simple docstring"""
snake_case_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case__ ( self : List[str] , __lowercase : List , __lowercase : Optional[List] = None , __lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case__ ( self : Any , __lowercase : Tuple , __lowercase : Optional[int]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 187
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
def __init__(self : Optional[Any] ):
__a : list[Any] = []
__a : int = 0
__a : int = 0
def lowerCAmelCase (self : Optional[int] ):
return self.head == self.tail
def lowerCAmelCase (self : List[Any] , snake_case_ : Any ):
self.data.append(snake_case_ )
__a : str = self.tail + 1
def lowerCAmelCase (self : Optional[int] ):
__a : int = self.data[self.head]
__a : Union[str, Any] = self.head + 1
return ret
def lowerCAmelCase (self : Union[str, Any] ):
return self.tail - self.head
def lowerCAmelCase (self : Union[str, Any] ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
def __init__(self : List[str] , snake_case_ : Any ):
__a : List[str] = data
__a : MyNode | None = None
__a : MyNode | None = None
__a : int = 1
def lowerCAmelCase (self : int ):
return self.data
def lowerCAmelCase (self : Dict ):
return self.left
def lowerCAmelCase (self : int ):
return self.right
def lowerCAmelCase (self : int ):
return self.height
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Any ):
__a : Tuple = data
def lowerCAmelCase (self : Any , snake_case_ : MyNode | None ):
__a : Any = node
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : MyNode | None ):
__a : List[str] = node
def lowerCAmelCase (self : Optional[int] , snake_case_ : int ):
__a : Union[str, Any] = height
def __UpperCamelCase ( lowerCAmelCase__ : MyNode | None ):
if node is None:
return 0
return node.get_height()
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if a > b:
return a
return b
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
print('''left rotation node:''' , node.get_data() )
__a : str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
__a : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
print('''right rotation node:''' , node.get_data() )
__a : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
__a : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
__a : Union[str, Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
__a : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : MyNode | None , lowerCAmelCase__ : Any ):
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__a : Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__a : str = right_rotation(lowerCAmelCase__ )
else:
__a : Dict = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__a : Dict = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__a : str = rl_rotation(lowerCAmelCase__ )
else:
__a : Tuple = left_rotation(lowerCAmelCase__ )
__a : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
while True:
__a : Union[str, Any] = root.get_right()
if right_child is None:
break
__a : str = right_child
return root.get_data()
def __UpperCamelCase ( lowerCAmelCase__ : MyNode ):
while True:
__a : Optional[int] = root.get_left()
if left_child is None:
break
__a : int = left_child
return root.get_data()
def __UpperCamelCase ( lowerCAmelCase__ : MyNode , lowerCAmelCase__ : Any ):
__a : Optional[Any] = root.get_left()
__a : List[str] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__a : str = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
__a : int = left_child
elif right_child is not None:
__a : List[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__a : List[Any] = left_rotation(lowerCAmelCase__ )
else:
__a : Union[str, Any] = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__a : int = right_rotation(lowerCAmelCase__ )
else:
__a : Tuple = lr_rotation(lowerCAmelCase__ )
__a : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class UpperCamelCase__ :
def __init__(self : Optional[Any] ):
__a : MyNode | None = None
def lowerCAmelCase (self : List[Any] ):
return get_height(self.root )
def lowerCAmelCase (self : Any , snake_case_ : Any ):
print('''insert:''' + str(snake_case_ ) )
__a : List[Any] = insert_node(self.root , snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Any ):
print('''delete:''' + str(snake_case_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
__a : Union[str, Any] = del_node(self.root , snake_case_ )
def __str__(self : List[str] , ): # a level traversale, gives a more intuitive look on the tree
__a : Union[str, Any] = ''''''
__a : int = MyQueue()
q.push(self.root )
__a : List[str] = self.get_height()
if layer == 0:
return output
__a : List[Any] = 0
while not q.is_empty():
__a : List[str] = q.pop()
__a : Optional[int] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case_ )
q.push(snake_case_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__a : int = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , snake_case_ ) - 1:
__a : str = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __UpperCamelCase ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase__ =AVLtree()
lowercase__ =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 216
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=99 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : Tuple=5 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : str=16 , _lowerCamelCase : int=2 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : int=4 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def lowercase ( self : Union[str, Any] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : Dict ):
_snake_case = FlaxAlbertModelTester(self )
@slow
def lowercase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''albert-base-v2''' )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ):
_snake_case = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_snake_case = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_snake_case = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 352
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = out_features
_snake_case = out_indices
_snake_case = scope
def lowercase ( self : Dict ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
_snake_case = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ):
_snake_case = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowercase ( self : int ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__a = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : str ):
_snake_case = ConvNextVaModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Dict ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowercase ( self : int ):
pass
def lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = False
_snake_case = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : str ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 40
| 0
|
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase ,**lowercase ):
return func(*lowercase ,**lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class a ( lowerCAmelCase_ ):
_snake_case : TensorFlowBenchmarkArguments
_snake_case : PretrainedConfig
_snake_case : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCAmelCase )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , training=__lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCAmelCase , training=__lowerCAmelCase )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(__lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , __lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_cls(__lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCAmelCase )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(__lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )[0]
_UpperCAmelCase = tf.gradients(__lowerCAmelCase , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
__lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__lowerCAmelCase )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(__lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(__lowerCAmelCase )
_UpperCAmelCase = Memory(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(__lowerCAmelCase )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 289
| 1
|
from collections.abc import Callable
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a
SCREAMING_SNAKE_CASE__ = b
if function(UpperCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase_ ) == 0:
return b
elif (
function(UpperCamelCase_ ) * function(UpperCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase_ ) == 0:
return mid
elif function(UpperCamelCase_ ) * function(UpperCamelCase_ ) < 0:
SCREAMING_SNAKE_CASE__ = mid
else:
SCREAMING_SNAKE_CASE__ = mid
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
return mid
def _lowercase ( UpperCamelCase_ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 350
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( _UpperCAmelCase ):
A__ : Any =(CMStochasticIterativeScheduler,)
A__ : Optional[int] =1_0
def A_ ( self : Dict , **UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**UpperCAmelCase_ )
return config
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0](**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A_ ( self : List[str] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def A_ ( self : Any ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 1
scheduler.set_timesteps(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.timesteps
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = pred_prev_sample
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ )
with self.assertRaises(UpperCAmelCase_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
| 169
| 0
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def A__ ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = 'lower newer'
UpperCamelCase = 'lower newer'
return input_text, output_text
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'lower newer'
UpperCamelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
UpperCamelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCamelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = 'Encode this sequence.'
UpperCamelCase = tokenizer.byte_encoder[' '.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
UpperCamelCase = '<mask>'
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
UpperCamelCase = 'Encode <mask> sequence'
UpperCamelCase = 'Encode <mask>sequence'
UpperCamelCase = tokenizer.encode(lowercase_ )
UpperCamelCase = encoded.index(lowercase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
UpperCamelCase = tokenizer.encode(lowercase_ )
UpperCamelCase = encoded.index(lowercase_ )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
pass
def A__ ( self ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCamelCase = 'A, <mask> AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCamelCase = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A__ ( self ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase_ )
def A__ ( self ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase = F"{text_of_1_token} {text_of_1_token}"
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCamelCase = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 321
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase( ):
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=UpperCAmelCase_ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=UpperCAmelCase_ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=UpperCAmelCase_ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=UpperCAmelCase_ , default=0 , help='cuda_id.' , )
UpperCAmelCase : str = parser.parse_args()
return args
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not len(UpperCAmelCase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCAmelCase , UpperCAmelCase : List[Any] = imgs[0].size
UpperCAmelCase : str = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = grid.size
for i, img in enumerate(UpperCAmelCase_ ):
grid.paste(UpperCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="robotic cat with wings" , UpperCAmelCase_=7.5 , UpperCAmelCase_=50 , UpperCAmelCase_=1 , UpperCAmelCase_=42 , ):
UpperCAmelCase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = pipeline(
UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , ).images
UpperCAmelCase : Dict = int(math.sqrt(UpperCAmelCase_ ) )
UpperCAmelCase : Optional[int] = image_grid(UpperCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase__ = unet.to(torch.device("cuda", args.cuda_id))
lowercase__ = pipeline.to(unet.device)
lowercase__ , lowercase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 151
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if tokenize_kwargs is None:
A_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A_ : str = truncation
A_ : List[str] = tokenize_kwargs
A_ : Dict = {}
if return_tensors is not None:
A_ : List[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _a (self , lowercase , **lowercase ):
A_ : Optional[int] = self.framework
A_ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
return model_inputs
def _a (self , lowercase ):
A_ : str = self.model(**lowercase )
return model_outputs
def _a (self , lowercase , lowercase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*lowercase , **lowercase )
| 135
|
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = 'src/diffusers'
a : Optional[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a : Dict = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : str = spec.loader.load_module()
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = object_name.split(""".""" )
UpperCAmelCase_: Tuple = 0
# First let's find the module where our object lives.
UpperCAmelCase_: Union[str, Any] = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[Any] = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_: Any = """"""
UpperCAmelCase_: Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_: Dict = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: Optional[int] = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
a : List[str] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a : Optional[int] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a : List[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = code.split("""\n""" )
UpperCAmelCase_: Any = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_: Union[str, Any] = F'class Bla:\n{code}'
UpperCAmelCase_: int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
UpperCAmelCase_: int = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[str] = f.readlines()
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_: Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = search.groups()
UpperCAmelCase_: str = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_: int = get_indent(lowerCAmelCase__ )
UpperCAmelCase_: Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_: Tuple = theoretical_indent
UpperCAmelCase_: Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_: Tuple = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_: Any = lines[line_index]
UpperCAmelCase_: Tuple = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: int = lines[start_index:line_index]
UpperCAmelCase_: Union[str, Any] = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_: int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_: Union[str, Any] = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCAmelCase_: int = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = pattern.groups()
UpperCAmelCase_: int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_: List[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_: Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_: str = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_: Optional[int] = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def lowerCAmelCase_ (lowerCAmelCase__: bool = False ):
"""simple docstring"""
UpperCAmelCase_: Dict = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = []
for filename in all_files:
UpperCAmelCase_: str = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Dict = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 147
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = 'src/diffusers'
a : Optional[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a : Dict = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : str = spec.loader.load_module()
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = object_name.split(""".""" )
UpperCAmelCase_: Tuple = 0
# First let's find the module where our object lives.
UpperCAmelCase_: Union[str, Any] = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[Any] = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_: Any = """"""
UpperCAmelCase_: Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_: Dict = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: Optional[int] = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
a : List[str] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a : Optional[int] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a : List[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = code.split("""\n""" )
UpperCAmelCase_: Any = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_: Union[str, Any] = F'class Bla:\n{code}'
UpperCAmelCase_: int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
UpperCAmelCase_: int = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[str] = f.readlines()
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_: Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = search.groups()
UpperCAmelCase_: str = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_: int = get_indent(lowerCAmelCase__ )
UpperCAmelCase_: Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_: Tuple = theoretical_indent
UpperCAmelCase_: Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_: Tuple = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_: Any = lines[line_index]
UpperCAmelCase_: Tuple = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: int = lines[start_index:line_index]
UpperCAmelCase_: Union[str, Any] = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_: int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_: Union[str, Any] = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCAmelCase_: int = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = pattern.groups()
UpperCAmelCase_: int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_: List[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_: Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_: str = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_: Optional[int] = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def lowerCAmelCase_ (lowerCAmelCase__: bool = False ):
"""simple docstring"""
UpperCAmelCase_: Dict = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = []
for filename in all_files:
UpperCAmelCase_: str = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Dict = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 147
| 1
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = 'src/diffusers'
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
lowerCamelCase__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
lowerCamelCase__ = '\n{0} = None\n'
lowerCamelCase__ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
lowerCamelCase__ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def __lowerCAmelCase ():
with open(os.path.join(__lowerCAmelCase , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_UpperCAmelCase : List[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
_UpperCAmelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
_UpperCAmelCase : Any = lines[line_index]
_UpperCAmelCase : int = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase=None ):
if backend_specific_objects is None:
_UpperCAmelCase : int = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_UpperCAmelCase : Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
_UpperCAmelCase : Optional[int] = "[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
_UpperCAmelCase : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
_UpperCAmelCase : Optional[Any] = dummy_file
return dummy_files
def __lowerCAmelCase (__lowerCAmelCase=False ):
_UpperCAmelCase : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_UpperCAmelCase : Union[str, Any] = {"torch": "pt"}
# Locate actual dummy modules and read their content.
_UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , "utils" )
_UpperCAmelCase : List[Any] = {
backend: os.path.join(__lowerCAmelCase , F"""dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
_UpperCAmelCase : Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : List[str] = f.read()
else:
_UpperCAmelCase : List[str] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : List[Any] = 1_2_8_0_2_2
A : Any = 1_2_8_0_2_8
@require_sentencepiece
class _UpperCamelCase ( _a ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =MaMaaaTokenizer
__UpperCAmelCase : List[str] =False
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : str =True
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCAmelCase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , **__a ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __a ):
return (
"This is a test",
"This is a test",
)
def snake_case ( self ):
__lowerCAmelCase = "</s>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(__UpperCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2, 3, 4, 5, 6] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__lowerCAmelCase = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , "This is a test" )
@slow
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = {"input_ids": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] ="""facebook/m2m100_418M"""
__UpperCAmelCase : Union[str, Any] =[
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Dict =[
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : List[str] =[EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_80_63 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCAmelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , __UpperCAmelCase )
def snake_case ( self ):
__lowerCAmelCase = "en"
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def snake_case ( self ):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
__lowerCAmelCase = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCAmelCase )
__lowerCAmelCase = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCAmelCase )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = "en"
__lowerCAmelCase = "fr"
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt" )
__lowerCAmelCase = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowerCAmelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case ( self ):
__lowerCAmelCase = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowerCAmelCase = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowerCAmelCase = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[12_80_22, 58, 41_83, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_80_06,
} , )
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 85
| 0
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( lowerCAmelCase__ ) -> Any:
return x + 2
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = '''x = 3'''
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Union[str, Any] = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3} )
UpperCAmelCase__ : Union[str, Any] = '''x = y'''
UpperCAmelCase__ : List[Any] = {'''y''': 5}
UpperCAmelCase__ : Optional[int] = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 5, '''y''': 5} )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''y = add_two(x)'''
UpperCAmelCase__ : Dict = {'''x''': 3}
UpperCAmelCase__ : int = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : Dict = evaluate(_A , {} , state=_A )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''x = 3'''
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : str = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
UpperCAmelCase__ : Optional[Any] = {'''x''': 3}
UpperCAmelCase__ : List[Any] = evaluate(_A , {'''add_two''': add_two} , state=_A )
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = '''x = 3\ny = 5'''
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Optional[Any] = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''text = f\'This is x: {x}.\''''
UpperCAmelCase__ : Any = {'''x''': 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_A , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
UpperCAmelCase__ : Dict = {'''x''': 3}
UpperCAmelCase__ : Any = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_A , {'''x''': 3, '''y''': 2} )
UpperCAmelCase__ : Optional[Any] = {'''x''': 8}
UpperCAmelCase__ : str = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 8, '''y''': 5} )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''test_list = [x, add_two(x)]'''
UpperCAmelCase__ : Union[str, Any] = {'''x''': 3}
UpperCAmelCase__ : List[Any] = evaluate(_A , {'''add_two''': add_two} , state=_A )
self.assertListEqual(_A , [3, 5] )
self.assertDictEqual(_A , {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''y = x'''
UpperCAmelCase__ : Optional[int] = {'''x''': 3}
UpperCAmelCase__ : str = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3, '''y''': 3} )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''test_list = [x, add_two(x)]\ntest_list[1]'''
UpperCAmelCase__ : List[Any] = {'''x''': 3}
UpperCAmelCase__ : str = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''test_list''': [3, 5]} )
UpperCAmelCase__ : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
UpperCAmelCase__ : int = {'''x''': 3}
UpperCAmelCase__ : Tuple = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''x = 0\nfor i in range(3):\n x = i'''
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = evaluate(_A , {'''range''': range} , state=_A )
assert result == 2
self.assertDictEqual(_A , {'''x''': 2, '''i''': 2} )
| 181
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self :int , lowerCamelCase :UNetaDModel , lowerCamelCase :ScoreSdeVeScheduler ) -> Any:
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCamelCase :int = 1 , lowerCamelCase :int = 2000 , lowerCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , **lowerCamelCase :Any , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(lowerCamelCase , generator=lowerCamelCase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase )
self.scheduler.set_sigmas(lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
# prediction step
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase )
| 169
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _A ( ) -> int:
'''simple docstring'''
__lowercase = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
__lowercase = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(UpperCamelCase_)
# Let's go
__lowercase = parser.parse_args()
if not hasattr(UpperCamelCase_, "func"):
parser.print_help()
exit(1)
# Run
__lowercase = args.func(UpperCamelCase_)
service.run()
if __name__ == "__main__":
main()
| 144
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "swin2sr"
__UpperCAmelCase : List[Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any, UpperCAmelCase__ : Dict=6_4, UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : Dict=3, UpperCAmelCase__ : Optional[Any]=1_8_0, UpperCAmelCase__ : Any=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Dict=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Tuple=8, UpperCAmelCase__ : Optional[int]=2.0, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Tuple=0.0, UpperCAmelCase__ : Optional[Any]=0.0, UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Dict="gelu", UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Dict=0.02, UpperCAmelCase__ : Tuple=1E-5, UpperCAmelCase__ : str=2, UpperCAmelCase__ : str=1.0, UpperCAmelCase__ : Optional[int]="1conv", UpperCAmelCase__ : Dict="pixelshuffle", **UpperCAmelCase__ : List[Any], ):
super().__init__(**UpperCAmelCase__ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(UpperCAmelCase__ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 144
| 1
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = '''Hello world! cécé herlolip'''
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: bool ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Dict = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
__lowerCamelCase : str = roberta.model.encoder.sentence_encoder
__lowerCamelCase : int = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__lowerCamelCase : Dict = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _lowerCamelCase )
__lowerCamelCase : int = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
__lowerCamelCase : List[str] = roberta_sent_encoder.embed_positions.weight
__lowerCamelCase : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCamelCase : Tuple = roberta_sent_encoder.layer_norm.weight
__lowerCamelCase : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase : BertLayer = model.roberta.encoder.layer[i]
__lowerCamelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__lowerCamelCase : RobertaAttention = layer.attention
__lowerCamelCase : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
__lowerCamelCase : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCamelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCamelCase : Dict = roberta_layer.self_attn.q_proj.weight
__lowerCamelCase : Optional[int] = roberta_layer.self_attn.q_proj.bias
__lowerCamelCase : List[str] = roberta_layer.self_attn.k_proj.weight
__lowerCamelCase : Optional[int] = roberta_layer.self_attn.k_proj.bias
__lowerCamelCase : Tuple = roberta_layer.self_attn.v_proj.weight
__lowerCamelCase : str = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCamelCase : int = roberta_layer.self_attn.out_proj.weight
__lowerCamelCase : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCamelCase : Tuple = roberta_layer.final_layer_norm.weight
__lowerCamelCase : Any = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCamelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase : Any = roberta_layer.fca.weight
__lowerCamelCase : Tuple = roberta_layer.fca.bias
# output
__lowerCamelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase : Dict = roberta_layer.fca.weight
__lowerCamelCase : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCamelCase : Any = roberta.model.classification_heads["mnli"].dense.weight
__lowerCamelCase : List[str] = roberta.model.classification_heads["mnli"].dense.bias
__lowerCamelCase : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
__lowerCamelCase : Optional[int] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__lowerCamelCase : List[str] = roberta.model.encoder.lm_head.dense.weight
__lowerCamelCase : Optional[Any] = roberta.model.encoder.lm_head.dense.bias
__lowerCamelCase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase : List[str] = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase : Union[str, Any] = roberta.model.encoder.lm_head.weight
__lowerCamelCase : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase : torch.Tensor = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
__lowerCamelCase : List[str] = model(_lowerCamelCase )[0]
if classification_head:
__lowerCamelCase : List[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(_lowerCamelCase ) )
else:
__lowerCamelCase : Dict = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase : Any = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__A = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 135
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=80 , UpperCAmelCase : Tuple=16000 , UpperCAmelCase : Any=80 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[int] , ):
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : str = num_mel_bins
__lowerCamelCase : Tuple = do_ceptral_normalize
__lowerCamelCase : Dict = normalize_means
__lowerCamelCase : str = normalize_vars
__lowerCamelCase : Optional[int] = True
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , ):
__lowerCamelCase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCamelCase : Optional[int] = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
__lowerCamelCase : str = ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__lowerCamelCase : Any = x[:input_length].mean(axis=0 )
__lowerCamelCase : Optional[int] = np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
__lowerCamelCase : int = x[:input_length].std(axis=0 )
__lowerCamelCase : Union[str, Any] = np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
__lowerCamelCase : Any = padding_value
# make sure array is in float32
__lowerCamelCase : List[str] = x.astype(np.floataa )
return x
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ):
__lowerCamelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowerCamelCase : Optional[int] = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase : Dict = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
__lowerCamelCase : Optional[int] = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : Optional[int] = [raw_speech]
# extract fbank features
__lowerCamelCase : Optional[Any] = [self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCamelCase : Dict = BatchFeature({"input_features": features} )
__lowerCamelCase : Optional[Any] = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
__lowerCamelCase : Tuple = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCAmelCase ):
__lowerCamelCase : List[str] = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
__lowerCamelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__lowerCamelCase : Union[str, Any] = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCamelCase : Optional[int] = (
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCamelCase : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
__lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 135
| 1
|
"""simple docstring"""
from typing import Any
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
if not input_list:
return []
_UpperCAmelCase : Dict = [input_list.count(_A ) for value in input_list]
_UpperCAmelCase : Union[str, Any] = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a = '''src/diffusers'''
# Matches is_xxx_available()
_a = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
_a = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
_a = '''
{0} = None
'''
_a = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
_a = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def _a ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: str = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase: Tuple = 0
__lowerCAmelCase: int = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase: List[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__lowerCAmelCase: Tuple = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
__lowerCAmelCase: Optional[Any] = lines[line_index]
__lowerCAmelCase: Tuple = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = objects
else:
line_index += 1
return backend_specific_objects
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]:
"""simple docstring"""
if backend_specific_objects is None:
__lowerCAmelCase: List[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase: List[Any] = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase: int = '[' + ', '.join(f'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
__lowerCAmelCase: List[str] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
__lowerCAmelCase: Union[str, Any] = dummy_file
return dummy_files
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> int:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase: List[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__lowerCAmelCase: int = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
__lowerCAmelCase: List[Any] = {
backend: os.path.join(SCREAMING_SNAKE_CASE , f'''dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
__lowerCAmelCase: Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase: str = f.read()
else:
__lowerCAmelCase: List[str] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 322
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''input_features''', '''attention_mask''']
def __init__( self : str ,_a : Optional[Any]=80 ,_a : Tuple=1_6000 ,_a : Tuple=80 ,_a : Optional[Any]=0.0 ,_a : Optional[int]=True ,_a : Optional[int]=True ,_a : str=True ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(feature_size=_a ,sampling_rate=_a ,padding_value=_a ,**_a )
_a : str = num_mel_bins
_a : Dict = do_ceptral_normalize
_a : Dict = normalize_means
_a : List[Any] = normalize_vars
_a : Optional[Any] = True
def __lowercase ( self : Dict ,_a : np.ndarray ,):
'''simple docstring'''
_a : int = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_a : Any = torch.from_numpy(_a ).unsqueeze(0 )
_a : Tuple = ta_kaldi.fbank(_a ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowercase ( _a : np.ndarray ,_a : int ,_a : Optional[bool] = True ,_a : Optional[bool] = True ,_a : float = 0.0 ,):
'''simple docstring'''
if normalize_means:
_a : Union[str, Any] = x[:input_length].mean(axis=0 )
_a : Any = np.subtract(_a ,_a )
if normalize_vars:
_a : Union[str, Any] = x[:input_length].std(axis=0 )
_a : str = np.divide(_a ,_a )
if input_length < x.shape[0]:
_a : List[Any] = padding_value
# make sure array is in float32
_a : int = x.astype(np.floataa )
return x
def __lowercase ( self : int ,_a : List[np.ndarray] ,_a : Optional[np.ndarray] = None ):
'''simple docstring'''
_a : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_a ,_a ,self.normalize_means ,self.normalize_vars ,self.padding_value )
for x, n in zip(_a ,_a )
]
def __call__( self : Optional[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[int] = None ,_a : Optional[bool] = None ,**_a : List[str] ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : List[Any] = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Dict = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Tuple = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
_a : Tuple = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Any = [raw_speech]
# extract fbank features
_a : Union[str, Any] = [self._extract_fbank_features(_a ) for waveform in raw_speech]
# convert into correct format for padding
_a : Tuple = BatchFeature({'input_features': features} )
_a : int = self.pad(
_a ,padding=_a ,max_length=_a ,truncation=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,**_a ,)
# make sure list is in array format
_a : Union[str, Any] = padded_inputs.get('input_features' )
if isinstance(input_features[0] ,_a ):
_a : int = [np.asarray(_a ,dtype=np.floataa ) for feature in input_features]
_a : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_a : List[Any] = [np.asarray(_a ,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_a : Optional[Any] = (
np.array(_a ,dtype=np.intaa )
if self._get_padding_strategies(_a ,max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a : List[str] = self.normalize(
padded_inputs['input_features'] ,attention_mask=_a )
if return_tensors is not None:
_a : str = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 364
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 0
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCamelCase_ = 299792458
# Symbols
UpperCamelCase_ = symbols('''ct x y z''')
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
return 1 / sqrt(1 - beta(_a ) ** 2 )
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
return np.array(
[
[gamma(_a ), -gamma(_a ) * beta(_a ), 0, 0],
[-gamma(_a ) * beta(_a ), gamma(_a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( _a : float , _a : np.ndarray | None = None ):
'''simple docstring'''
if event is None:
UpperCAmelCase_ : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCamelCase_ = transform(29979245)
print('''Example of four vector: ''')
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCamelCase_ = {ct: c, x: 1, y: 1, z: 1}
UpperCamelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 345
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
class lowercase ( lowerCAmelCase__):
"""simple docstring"""
a__ : str = "timm_backbone"
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[int] , ) -> Tuple:
super().__init__(**a__ )
UpperCAmelCase_= backbone
UpperCAmelCase_= num_channels
UpperCAmelCase_= features_only
UpperCAmelCase_= use_pretrained_backbone
UpperCAmelCase_= True
UpperCAmelCase_= out_indices if out_indices is not None else (-1,)
| 364
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A = datasets.utils.logging.get_logger(__name__)
__A = ['''names''', '''prefix''']
__A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__A = ['''encoding_errors''', '''on_bad_lines''']
__A = ['''date_format''']
@dataclass
class lowercase ( datasets.BuilderConfig):
"""simple docstring"""
a__ : str = ","
a__ : Optional[str] = None
a__ : Optional[Union[int, List[int], str]] = "infer"
a__ : Optional[List[str]] = None
a__ : Optional[List[str]] = None
a__ : Optional[Union[int, str, List[int], List[str]]] = None
a__ : Optional[Union[List[int], List[str]]] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : Optional[Literal["c", "python", "pyarrow"]] = None
a__ : Dict[Union[int, str], Callable[[Any], Any]] = None
a__ : Optional[list] = None
a__ : Optional[list] = None
a__ : bool = False
a__ : Optional[Union[int, List[int]]] = None
a__ : Optional[int] = None
a__ : Optional[Union[str, List[str]]] = None
a__ : bool = True
a__ : bool = True
a__ : bool = False
a__ : bool = True
a__ : Optional[str] = None
a__ : str = "."
a__ : Optional[str] = None
a__ : str = '"'
a__ : int = 0
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : bool = True
a__ : int = 0
a__ : bool = True
a__ : bool = False
a__ : Optional[str] = None
a__ : int = 1_0000
a__ : Optional[datasets.Features] = None
a__ : Optional[str] = "strict"
a__ : Literal["error", "warn", "skip"] = "error"
a__ : Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
if self.delimiter is not None:
UpperCAmelCase_= self.delimiter
if self.column_names is not None:
UpperCAmelCase_= self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_= {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
a__ : int = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_= data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase_= []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
UpperCAmelCase_= self.config.features.arrow_schema
if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]:
UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ):
UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" )
raise
| 277
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A__ : Tuple = [
'good first issue',
'feature request',
'wip',
]
def _snake_case ( ) -> Any:
lowerCamelCase_ : Optional[Any] =Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase_ : Union[str, Any] =g.get_repo("huggingface/accelerate" )
lowerCamelCase_ : Dict =repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase_ : int =sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =comments[0] if len(lowerCamelCase__ ) > 0 else None
lowerCamelCase_ : Tuple =dt.utcnow()
lowerCamelCase_ : List[Any] =(current_time - issue.updated_at).days
lowerCamelCase_ : Optional[int] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 144
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ConditionalDetrFeatureExtractor"""]
__snake_case =["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55
| 1
|
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : int = 1_00 , ):
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(SCREAMING_SNAKE_CASE_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def _a ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(SCREAMING_SNAKE_CASE_ )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE_ ) - np.abs(SCREAMING_SNAKE_CASE_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 92
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202
| 0
|
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _A ( _a : Optional[Any] , _a : Dict , _a : Optional[int] ):
"""simple docstring"""
A = AutoConfig.from_pretrained(_UpperCAmelCase )
A = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase )
A = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
A = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
A = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A = 'TransientGlobalSelfAttention'
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
A = f'layers_{str(_UpperCAmelCase )}'
# Self-Attention
A = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
A = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
A = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
A = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
A = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
A = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
A = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
A = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
A = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
A = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
A = flax_model.params['encoder']['block'][str(_UpperCAmelCase )]['layer']
A = tax_attention_key
A = tax_attention_out
A = tax_attention_query
A = tax_attention_value
A = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A = tax_global_layer_norm
if split_mlp_wi:
A = tax_mlp_wi_a
A = tax_mlp_wi_a
else:
A = tax_mlp_wi
A = tax_mlp_wo
A = tax_mlp_layer_norm
A = flax_model_encoder_layer_block
# Only for layer 0:
A = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
A = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
A = tax_encoder_global_rel_embedding
# Assigning
A = tax_model['target']['encoder']['encoder_norm']['scale']
A = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A = f'layers_{str(_UpperCAmelCase )}'
# Self-Attention
A = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
A = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
A = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
A = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
A = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
A = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
A = tax_enc_dec_attention_module['key']['kernel']
A = tax_enc_dec_attention_module['out']['kernel']
A = tax_enc_dec_attention_module['query']['kernel']
A = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
A = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
A = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
A = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
A = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
A = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
A = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
A = flax_model.params['decoder']['block'][str(_UpperCAmelCase )]['layer']
A = tax_attention_key
A = tax_attention_out
A = tax_attention_query
A = tax_attention_value
A = tax_pre_attention_layer_norm
A = tax_enc_dec_attention_key
A = tax_enc_dec_attention_out
A = tax_enc_dec_attention_query
A = tax_enc_dec_attention_value
A = tax_cross_layer_norm
if split_mlp_wi:
A = tax_mlp_wi_a
A = tax_mlp_wi_a
else:
A = tax_mlp_wi
A = tax_mlp_wo
A = txa_mlp_layer_norm
A = flax_model_decoder_layer_block
# Decoder Normalization
A = tax_model['target']['decoder']['decoder_norm']['scale']
A = txa_decoder_norm
# Only for layer 0:
A = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
A = tax_decoder_rel_embedding
# Token Embeddings
A = tax_model['target']['token_embedder']['embedding']
A = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(_UpperCAmelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCAmelCase =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 351
|
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'wavlm'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=320 , _lowerCamelCase=800 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_5 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=(512, 512, 512, 512, 1500) , _lowerCamelCase=(5, 3, 3, 1, 1) , _lowerCamelCase=(1, 2, 3, 1, 1) , _lowerCamelCase=512 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[Any]:
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = feat_extract_norm
SCREAMING_SNAKE_CASE : Dict = feat_extract_activation
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = conv_bias
SCREAMING_SNAKE_CASE : Optional[int] = num_buckets
SCREAMING_SNAKE_CASE : List[str] = max_bucket_distance
SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Optional[int] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = feat_proj_dropout
SCREAMING_SNAKE_CASE : Any = final_dropout
SCREAMING_SNAKE_CASE : List[Any] = layerdrop
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : int = num_ctc_classes
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : int = do_stable_layer_norm
SCREAMING_SNAKE_CASE : List[str] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE : str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE : Tuple = mask_time_prob
SCREAMING_SNAKE_CASE : str = mask_time_length
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : int = num_codevectors_per_group
SCREAMING_SNAKE_CASE : Any = num_codevector_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = num_negatives
SCREAMING_SNAKE_CASE : str = codevector_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE : Dict = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : List[str] = ctc_loss_reduction
SCREAMING_SNAKE_CASE : int = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : Union[str, Any] = add_adapter
SCREAMING_SNAKE_CASE : List[str] = adapter_kernel_size
SCREAMING_SNAKE_CASE : Tuple = adapter_stride
SCREAMING_SNAKE_CASE : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Tuple = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) ->int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 313
|
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Any = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], **_snake_case : str ) ->Dict:
super().__init__(**_snake_case )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Union[str, Any], _snake_case : Union[np.ndarray, bytes, str], **_snake_case : Tuple ) ->Dict:
return super().__call__(_snake_case, **_snake_case )
def lowercase_ ( self : Tuple, **_snake_case : Any ) ->Union[str, Any]:
snake_case__ : str = {}
if "candidate_labels" in kwargs:
snake_case__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case__ : str = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self : Dict, _snake_case : str, _snake_case : Optional[int]=None, _snake_case : List[str]="This is a sound of {}." ) ->int:
if isinstance(_snake_case, _snake_case ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case__ : List[Any] = requests.get(_snake_case ).content
else:
with open(_snake_case, 'rb' ) as f:
snake_case__ : Union[str, Any] = f.read()
if isinstance(_snake_case, _snake_case ):
snake_case__ : List[Any] = ffmpeg_read(_snake_case, self.feature_extractor.sampling_rate )
if not isinstance(_snake_case, np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
snake_case__ : Tuple = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt' )
snake_case__ : int = candidate_labels
snake_case__ : int = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
snake_case__ : Optional[int] = self.tokenizer(_snake_case, return_tensors=self.framework, padding=_snake_case )
snake_case__ : List[Any] = [text_inputs]
return inputs
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any] ) ->int:
snake_case__ : Optional[int] = model_inputs.pop('candidate_labels' )
snake_case__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], _snake_case ):
snake_case__ : Optional[Any] = text_inputs[0]
else:
# Batching case.
snake_case__ : int = text_inputs[0][0]
snake_case__ : Any = self.model(**_snake_case, **_snake_case )
snake_case__ : List[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def lowercase_ ( self : Union[str, Any], _snake_case : str ) ->List[str]:
snake_case__ : int = model_outputs.pop('candidate_labels' )
snake_case__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
snake_case__ : Tuple = logits.softmax(dim=0 )
snake_case__ : Union[str, Any] = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
snake_case__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_snake_case, _snake_case ), key=lambda _snake_case : -x[0] )
]
return result
| 277
| 0
|
import argparse
import os
import re
import packaging.version
__lowerCAmelCase = '''examples/'''
__lowerCAmelCase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase = '''README.md'''
def snake_case_ ( snake_case , snake_case , snake_case ) -> str:
with open(snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__: Any = f.read()
lowercase__ , lowercase__: List[str] = REPLACE_PATTERNS[pattern]
lowercase__: Optional[Any] = replace.replace('VERSION' , snake_case )
lowercase__: Optional[int] = re_pattern.sub(snake_case , snake_case )
with open(snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case )
def snake_case_ ( snake_case ) -> str:
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern='examples' )
def snake_case_ ( snake_case , snake_case=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def snake_case_ ( ) -> List[str]:
lowercase__: Tuple = '🤗 Transformers currently provides the following architectures'
lowercase__: int = '1. Want to contribute a new model?'
with open(snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__: List[str] = f.readlines()
# Find the start of the list.
lowercase__: Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase__: Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowercase__: str = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case )
def snake_case_ ( ) -> Optional[int]:
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowercase__: List[Any] = f.read()
lowercase__: int = REPLACE_PATTERNS['init'][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def snake_case_ ( snake_case=False ) -> Any:
lowercase__: Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowercase__: List[Any] = default_version.base_version
elif patch:
lowercase__: Any = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowercase__: str = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowercase__: List[str] = input(f'Which version are you releasing? [{default_version}]' )
if len(snake_case ) == 0:
lowercase__: List[str] = default_version
print(f'Updating version to {version}.' )
global_version_update(snake_case , patch=snake_case )
def snake_case_ ( ) -> str:
lowercase__: int = get_version()
lowercase__: List[Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowercase__: Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowercase__: Any = input(f'Which version are we developing now? [{dev_version}]' )
if len(snake_case ) == 0:
lowercase__: int = dev_version
print(f'Updating version to {version}.' )
global_version_update(snake_case )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 288
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( snake_case , snake_case , snake_case = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(snake_case ), magnitude * sin(snake_case )]
return [magnitude * cos(radians(snake_case ) ), magnitude * sin(radians(snake_case ) )]
def snake_case_ ( snake_case , snake_case , snake_case = 10**-1 ) -> bool:
lowercase__: NDArray[floataa] = cross(snake_case , snake_case )
lowercase__: float = sum(snake_case )
return abs(snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowerCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowerCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__lowerCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowerCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__lowerCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 288
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict ):
lowerCamelCase_ = BeautifulSoup(requests.get(UpperCAmelCase_ , params=UpperCAmelCase_ ).content , "html.parser" )
lowerCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
a_ : str = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 55
|
'''simple docstring'''
a_ : Any = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 55
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
__UpperCamelCase =key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
__UpperCamelCase =key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__UpperCamelCase =key[key.find('patch_embed' ) + len('patch_embed' )]
__UpperCamelCase =key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
__UpperCamelCase =key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__UpperCamelCase =key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
__UpperCamelCase =key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
__UpperCamelCase =key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
__UpperCamelCase =key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
__UpperCamelCase =key[key.find('block' ) + len('block' )]
__UpperCamelCase =key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
__UpperCamelCase =key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
__UpperCamelCase =key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
__UpperCamelCase =key.replace('attn' , 'attention.self' )
if "fc1" in key:
__UpperCamelCase =key.replace('fc1' , 'dense1' )
if "fc2" in key:
__UpperCamelCase =key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
__UpperCamelCase =key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
__UpperCamelCase =key.replace('linear_fuse.conv' , 'linear_fuse' )
__UpperCamelCase =key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__UpperCamelCase =key[key.find('linear_c' ) + len('linear_c' )]
__UpperCamelCase =key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
__UpperCamelCase =key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
__UpperCamelCase =key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
__UpperCamelCase =key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
__UpperCamelCase =key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
__UpperCamelCase =key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
__UpperCamelCase =key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
__UpperCamelCase =key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
__UpperCamelCase =key.replace('module.last_layer_depth' , 'head.head' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__UpperCamelCase =state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
__UpperCamelCase =state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__UpperCamelCase =kv_weight[
: config.hidden_sizes[i], :
]
__UpperCamelCase =kv_bias[: config.hidden_sizes[i]]
__UpperCamelCase =kv_weight[
config.hidden_sizes[i] :, :
]
__UpperCamelCase =kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
__UpperCamelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase =Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
__UpperCamelCase =GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__UpperCamelCase =GLPNImageProcessor()
# prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
__UpperCamelCase =torch.load(lowerCAmelCase__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
__UpperCamelCase =GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
__UpperCamelCase =model(lowerCAmelCase__ )
__UpperCamelCase =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__UpperCamelCase =torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
__UpperCamelCase =torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
__UpperCamelCase =torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 366
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =torchvision.models.resnetaaa(pretrained=A_ )
__UpperCamelCase =list(model.children() )[:-2]
__UpperCamelCase =nn.Sequential(*A_ )
__UpperCamelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self , A_ ) -> int:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCamelCase =self.pool(self.model(A_ ) )
__UpperCamelCase =torch.flatten(A_ , start_dim=2 )
__UpperCamelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =[json.loads(A_ ) for l in open(A_ )]
__UpperCamelCase =os.path.dirname(A_ )
__UpperCamelCase =tokenizer
__UpperCamelCase =labels
__UpperCamelCase =len(A_ )
__UpperCamelCase =max_seq_length
__UpperCamelCase =transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =sentence[0], sentence[1:-1], sentence[-1]
__UpperCamelCase =sentence[: self.max_seq_length]
__UpperCamelCase =torch.zeros(self.n_classes )
__UpperCamelCase =1
__UpperCamelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
__UpperCamelCase =self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> List[str]:
__UpperCamelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[len(row['sentence'] ) for row in batch]
__UpperCamelCase , __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =input_row['sentence']
__UpperCamelCase =1
__UpperCamelCase =torch.stack([row['image'] for row in batch] )
__UpperCamelCase =torch.stack([row['label'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_start_token'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 117
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number > 0:
raise ValueError('''input must be a negative integer''' )
__UpperCamelCase :str = len(bin(SCREAMING_SNAKE_CASE )[3:] )
__UpperCamelCase :Optional[Any] = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__UpperCamelCase :Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple):
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_)
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : int = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Union[str, Any] = is_training
UpperCamelCase_: List[str] = use_input_mask
UpperCamelCase_: Union[str, Any] = use_token_type_ids
UpperCamelCase_: int = use_labels
UpperCamelCase_: List[Any] = vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Any = intermediate_multiple_size
UpperCamelCase_: str = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout
UpperCamelCase_: Optional[int] = attention_dropout
UpperCamelCase_: Dict = weight_tying
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = type_vocab_size
UpperCamelCase_: str = type_sequence_label_size
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: Optional[Any] = num_labels
UpperCamelCase_: List[Any] = num_choices
UpperCamelCase_: List[str] = scope
def _a ( self ):
UpperCamelCase_: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: str = None
if self.use_labels:
UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def _a ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase_: Any = True
return config, input_ids, input_mask, token_labels
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = GPTNeoXJapaneseModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = True
UpperCamelCase_: Optional[Any] = GPTNeoXJapaneseModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = GPTNeoXJapaneseForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = True
UpperCamelCase_: Any = GPTNeoXJapaneseForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
UpperCamelCase_: Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_: List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_: Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_: List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_: Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_: Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase )
UpperCamelCase_: int = output_from_no_past['hidden_states'][0]
UpperCamelCase_: Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['hidden_states'][0]
# select random slice
UpperCamelCase_: str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_: List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def _a ( self ):
UpperCamelCase_: Tuple = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = config_and_inputs
UpperCamelCase_: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : int =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a : int =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a : Any =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a : Any =False
a : Union[str, Any] =False
a : Dict =False
a : Union[str, Any] =False
def _a ( self ):
UpperCamelCase_: Optional[Any] = GPTNeoXJapaneseModelTester(self )
UpperCamelCase_: str = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_: Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: List[str] = 'abeja/gpt-neox-japanese-2.7b'
UpperCamelCase_: Optional[Any] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
UpperCamelCase_: List[str] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
UpperCamelCase_: Dict = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCamelCase )
UpperCamelCase_: Tuple = []
for prompt in prompts:
UpperCamelCase_: Tuple = tokenizer(_lowerCamelCase , return_tensors='pt' ).input_ids
UpperCamelCase_: int = model.generate(_lowerCamelCase , max_length=5_0 )
UpperCamelCase_: Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 292
|
def snake_case (UpperCAmelCase__ ) -> int:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
UpperCamelCase_: List[Any] = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(UpperCAmelCase__ )
else:
UpperCamelCase_: str = sylvester(number - 1 )
UpperCamelCase_: str = num - 1
UpperCamelCase_: Any = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 292
| 1
|
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int ) -> Optional[Any]:
# Checks if the entire collection has been sorted
if len(__lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(__lowerCamelCase , n - 1 )
rec_insertion_sort(__lowerCamelCase , n - 1 )
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int ) -> Optional[Any]:
# Checks order between adjacent elements
if index >= len(__lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_snake_case , _snake_case = (
collection[index],
collection[index - 1],
)
insert_next(__lowerCamelCase , index + 1 )
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter integers separated by spaces: ')
UpperCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 288
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self : Tuple , A : int ) ->Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCamelCase__ : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def __lowerCamelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Tuple = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : str = '''sgugger/tiny-distilbert-classification'''
lowerCamelCase__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
lowerCamelCase__ : Any = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Tuple ) ->Dict:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : List[str] = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
lowerCamelCase__ : List[Any] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : List[Any] = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Dict = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Optional[Any] = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : List[Any] = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
lowerCamelCase__ : Any = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
lowerCamelCase__ : int = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(lowercase_ )
lowerCamelCase__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Dict = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : str = '''sshleifer/tinier_bart'''
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ )
lowerCamelCase__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Any = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : int = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Any = AutoConfig.from_pretrained(lowercase_ )
lowerCamelCase__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Optional[int] = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCamelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : int = '''sshleifer/tinier_bart'''
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowercase_ )
lowerCamelCase__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCamelCase__ : Union[str, Any] = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : Tuple ) ->Dict:
lowerCamelCase__ : Dict = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(lowercase_ , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(lowercase_ , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(lowercase_ , '''train_time.csv''' ) , env_info_csv_file=os.path.join(lowercase_ , '''env.csv''' ) , multi_process=lowercase_ , )
lowerCamelCase__ : Dict = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , '''env.csv''' ) ).exists() )
def __lowerCamelCase ( self : Tuple ) ->int:
lowerCamelCase__ : List[Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(A : List[Any] ):
self.assertTrue(hasattr(lowercase_ , '''sequential''' ) )
self.assertTrue(hasattr(lowercase_ , '''cumulative''' ) )
self.assertTrue(hasattr(lowercase_ , '''current''' ) )
self.assertTrue(hasattr(lowercase_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , '''log.txt''' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
lowerCamelCase__ : Union[str, Any] = PyTorchBenchmark(lowercase_ )
lowerCamelCase__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_ , '''log.txt''' ) ).exists() )
| 364
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 265
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Any=False , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=6.0 , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[Any]="fp4" , __UpperCAmelCase : List[str]=False , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = load_in_abit
_A = load_in_abit
_A = llm_inta_threshold
_A = llm_inta_skip_modules
_A = llm_inta_enable_fpaa_cpu_offload
_A = llm_inta_has_fpaa_weight
_A = bnb_abit_quant_type
_A = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_A = torch.floataa
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = getattr(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , torch.dtype ):
_A = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , __UpperCAmelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __UpperCAmelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __UpperCAmelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , __UpperCAmelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , __UpperCAmelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , __UpperCAmelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowerCAmelCase ( self : int ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , **__UpperCAmelCase : Dict ):
'''simple docstring'''
_A = cls(**__UpperCAmelCase )
_A = []
for key, value in kwargs.items():
if hasattr(__UpperCAmelCase , __UpperCAmelCase ):
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
to_remove.append(__UpperCAmelCase )
for key in to_remove:
kwargs.pop(__UpperCAmelCase , __UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, os.PathLike] ):
'''simple docstring'''
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
_A = self.to_dict()
_A = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + "\n"
writer.write(__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : List[Any] ):
'''simple docstring'''
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : bool = True ):
'''simple docstring'''
if use_diff is True:
_A = self.to_diff_dict()
else:
_A = self.to_dict()
return json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + "\n"
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.to_dict()
# get the default config dict
_A = BitsAndBytesConfig().to_dict()
_A = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_A = value
return serializable_config_dict
| 79
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """pix2struct_text_model"""
lowerCAmelCase_ : str = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = d_kv
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase__ = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model"""
def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = patch_embed_hidden_size
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = dense_act_fn
UpperCAmelCase__ = seq_len
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """pix2struct"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = self.text_config.decoder_start_token_id
UpperCAmelCase__ = self.text_config.pad_token_id
UpperCAmelCase__ = self.text_config.eos_token_id
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 362
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , SCREAMING_SNAKE_CASE__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _UpperCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCAmelCase__ = torch.device("""cpu""" )
UpperCAmelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ = smp.local_rank()
UpperCAmelCase__ = torch.device("""cuda""" , _UpperCAmelCase )
UpperCAmelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return False
| 61
| 0
|
"""simple docstring"""
from manim import *
class UpperCAmelCase_ ( _a):
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = Rectangle(height=0.5 , width=0.5 )
lowercase__ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ : List[str] = [mem.copy() for i in range(6 )]
lowercase__ : int = [mem.copy() for i in range(6 )]
lowercase__ : int = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Optional[int] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : List[str] = VGroup(a , a ).arrange(a , buff=0 )
lowercase__ : Union[str, Any] = Text('CPU' , font_size=2_4 )
lowercase__ : Union[str, Any] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
lowercase__ : Union[str, Any] = [mem.copy() for i in range(4 )]
lowercase__ : List[Any] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Tuple = Text('GPU' , font_size=2_4 )
lowercase__ : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
lowercase__ : int = [mem.copy() for i in range(6 )]
lowercase__ : List[str] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : int = Text('Model' , font_size=2_4 )
lowercase__ : Optional[Any] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
lowercase__ : Dict = []
for i, rect in enumerate(a ):
rect.set_stroke(a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase__ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a , buff=0.0 )
self.add(a )
cpu_targs.append(a )
lowercase__ : Any = [mem.copy() for i in range(6 )]
lowercase__ : Optional[Any] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Optional[int] = Text('Loaded Checkpoint' , font_size=2_4 )
lowercase__ : Dict = Group(a , a ).arrange(a , aligned_edge=a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowercase__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
lowercase__ : List[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowercase__ : List[str] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) , Write(a ) )
self.play(Write(a , run_time=1 ) , Create(a , run_time=1 ) )
lowercase__ : str = []
lowercase__ : Any = []
for i, rect in enumerate(a ):
lowercase__ : Optional[int] = fill.copy().set_fill(a , opacity=0.7 )
target.move_to(a )
first_animations.append(GrowFromCenter(a , run_time=1 ) )
lowercase__ : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a , run_time=1.5 ) )
self.play(*a )
self.play(*a )
self.wait()
| 77
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase_ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_ ( _a : Dict , _a : Any , _a : Optional[int] , _a : Any , _a : Optional[Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase_ : Optional[int] = getattr(_a , _a )
if weight_type is not None:
UpperCAmelCase_ : Dict = getattr(_a , _a ).shape
else:
UpperCAmelCase_ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase_ : Any = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase_ : Tuple = value
elif weight_type == "bias":
UpperCAmelCase_ : int = value
else:
UpperCAmelCase_ : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _a : Any , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Dict = fairseq_model.state_dict()
UpperCAmelCase_ : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase_ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase_ : Any = True
if "*" in mapped_key:
UpperCAmelCase_ : Any = name.split(_a )[0].split(""".""" )[-2]
UpperCAmelCase_ : List[str] = mapped_key.replace("""*""" , _a )
if "weight_g" in name:
UpperCAmelCase_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_ : List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase_ : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ : int = """weight"""
else:
UpperCAmelCase_ : int = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( _a : Optional[Any] , _a : str , _a : Optional[int] , _a : Dict , _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase_ : str = name.split(""".""" )
UpperCAmelCase_ : Optional[Any] = int(items[0] )
UpperCAmelCase_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase_ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase_ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase_ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase_ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_a )
@torch.no_grad()
def lowerCamelCase_ ( _a : Optional[Any] , _a : List[str] , _a : Dict=None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = torch.load(_a )
UpperCAmelCase_ : Tuple = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase_ : Tuple = WavLMOrig(_a )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase_ : Tuple = WavLMConfig.from_pretrained(_a )
else:
UpperCAmelCase_ : Optional[int] = WavLMConfig()
UpperCAmelCase_ : Optional[Any] = WavLMModel(_a )
recursively_load_weights(_a , _a )
hf_wavlm.save_pretrained(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase_ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 59
|
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 59
| 1
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A__ ( UpperCamelCase ):
if isinstance(UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _UpperCAmelCase :
def lowerCamelCase ( self :str , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] ):
pass
def lowerCamelCase ( self :List[str] ):
pass
def lowerCamelCase ( self :Optional[Any] ):
pass
def lowerCamelCase ( self :Dict , __UpperCamelCase :np.ndarray , __UpperCamelCase :np.ndarray , __UpperCamelCase :float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Dict , __UpperCamelCase :List[Any]=None , **__UpperCamelCase :Any ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
A = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any]=None , **__UpperCamelCase :Union[str, Any] ):
A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
A = {"vision_model": vision_model, "text_model": text_model}
A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any]=None , **__UpperCamelCase :str ):
A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
A = {"vision_model": vision_model, "text_model": text_model}
A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
A = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
A = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
A = after_output[0]
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1e-3 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :str=None , **__UpperCamelCase :List[str] ):
A, A = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
A = {"vision_model": vision_model, "text_model": text_model}
A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
A = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :Dict , __UpperCamelCase :Tuple ):
pt_model.to(__UpperCamelCase )
pt_model.eval()
# prepare inputs
A = inputs_dict
A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
A = pt_model(**__UpperCamelCase ).to_tuple()
A = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
A = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
A = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase )
pt_model_loaded.to(__UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
A = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4e-2 )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
A = VisionTextDualEncoderModel(__UpperCamelCase )
A = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase )
A = fx_state
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
A = VisionTextDualEncoderModel(__UpperCamelCase )
A = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
A = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params )
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@is_pt_flax_cross_test
def lowerCamelCase ( self :List[Any] ):
A = self.prepare_config_and_inputs()
A = config_inputs_dict.pop("vision_config" )
A = config_inputs_dict.pop("text_config" )
A = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase ( self :Dict ):
A, A = self.get_pretrained_model_and_inputs()
A = model_a(**__UpperCamelCase )
A = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
A = model_a(**__UpperCamelCase )
A = after_outputs[0]
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1e-5 )
@require_flax
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
def lowerCamelCase ( self :Optional[int] ):
A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
A = 13
A = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCamelCase ( self :Any , __UpperCamelCase :Dict , __UpperCamelCase :List[Any] ):
A = FlaxViTModel(__UpperCamelCase )
A = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowerCamelCase ( self :Optional[Any] ):
A = FlaxViTModelTester(self )
A = FlaxBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A, A = vision_config_and_inputs
A, A, A, A = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
def lowerCamelCase ( self :List[Any] ):
A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
A = 13
A = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = FlaxCLIPVisionModel(__UpperCamelCase )
A = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowerCamelCase ( self :Optional[int] ):
A = FlaxCLIPVisionModelTester(self )
A = FlaxBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A, A = vision_config_and_inputs
A, A, A, A = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :Any ):
A = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
A = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" )
A = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1e-3 ) )
| 292
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292
| 1
|
import copy
import re
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = """hp"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = prefix
SCREAMING_SNAKE_CASE_ : List[str] = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return ""
SCREAMING_SNAKE_CASE_ : int = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCAmelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE_ : Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
while integer != 0:
SCREAMING_SNAKE_CASE_ : Any = chr(ord('A' ) + integer % 1_0 ) + s
integer //= 1_0
return s
SCREAMING_SNAKE_CASE_ : Tuple = 0
while True:
SCREAMING_SNAKE_CASE_ : int = word + '#' + int_to_alphabetic(lowerCAmelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE_ : Tuple = sword
break
SCREAMING_SNAKE_CASE_ : Tuple = short_word
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word
return short_word
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = param_name.split('_' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [TrialShortNamer.shortname_for_word(lowerCAmelCase__ , lowerCAmelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE_ : Optional[int] = ['', '_']
for separator in separators:
SCREAMING_SNAKE_CASE_ : Optional[Any] = separator.join(lowerCAmelCase__ )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = shortname
SCREAMING_SNAKE_CASE_ : Optional[int] = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TrialShortNamer.shortname_for_key(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = short_name
SCREAMING_SNAKE_CASE_ : Union[str, Any] = param_name
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE_ : int = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
SCREAMING_SNAKE_CASE_ : List[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = info
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 if v else 0
SCREAMING_SNAKE_CASE_ : List[str] = '' if isinstance(lowerCAmelCase__ , (int, float) ) else '-'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'''{key}{sep}{v}'''
name.append(lowerCAmelCase__ )
return "_".join(lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE_ : List[str] = []
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = repr.split('_' )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = value.split('-' )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.sub('[0-9.]' , '' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = float(re.sub('[^0-9.]' , '' , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cls.NAMING_INFO['reverse_short_param'][p_k]
SCREAMING_SNAKE_CASE_ : Optional[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE_ : Optional[int] = cls.DEFAULTS[k]
return parameters
| 162
|
import math
import unittest
def a__ ( A__ ):
assert isinstance(A__, A__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 162
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def snake_case__ ( self : Dict,**lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001],[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_,beta_end=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps,device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 7
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : ClassVar[Features] = Features({"""image""": Image()} )
a_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
a_ : str = "image"
a_ : str = "labels"
def lowerCamelCase ( self : Dict , a_ : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase_ : Optional[int] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[int] = self.label_schema.copy()
lowerCAmelCase_ : List[str] = features[self.label_column]
lowerCAmelCase_ : Optional[Any] = label_schema
return task_template
@property
def lowerCamelCase ( self : str ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 367
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ : List[str] = np.zeros_like(__UpperCamelCase )
lowerCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase_ : List[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase_ : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase_ : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowercase__ = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 161
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__ ( lowercase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=lowercase_ ,default=lowercase_ ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=lowercase_ ,help="""Name of the model to download""" )
download_parser.set_defaults(func=lowercase_ )
def __init__( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = cache
SCREAMING_SNAKE_CASE = force
SCREAMING_SNAKE_CASE = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 296
|
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61
| 0
|
import math
import sys
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if number != int(lowerCAmelCase__ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowercase = [-1] * (number + 1)
lowercase = 0
for i in range(1 , number + 1 ):
lowercase = sys.maxsize
lowercase = int(math.sqrt(lowerCAmelCase__ ) )
for j in range(1 , root + 1 ):
lowercase = 1 + answers[i - (j**2)]
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( lowerCAmelCase__="ro" , lowerCAmelCase__="en" , lowerCAmelCase__="wmt16" , lowerCAmelCase__=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
lowercase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
lowercase = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
if save_dir is None:
lowercase = f'{dataset}-{pair}'
lowercase = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase = '''val''' if split == '''validation''' else split
lowercase = save_dir.joinpath(f'{fn}.source' )
lowercase = save_dir.joinpath(f'{fn}.target' )
lowercase = src_path.open('''w+''' )
lowercase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 97
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : int = tempfile.mkdtemp()
# fmt: off
snake_case : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case : int = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : List[str] ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_tokenizer()
snake_case : Optional[Any] = self.get_image_processor()
snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Tuple = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : str = image_processor(snake_case__ , return_tensors="np" )
snake_case : Any = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Tuple = "lower newer"
snake_case : Tuple = processor(text=snake_case__ )
snake_case : Union[str, Any] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : Dict = self.get_tokenizer()
snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : Dict = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Tuple = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(snake_case__ )
snake_case : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = "lower newer"
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Tuple = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 59
| 1
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser(UpperCamelCase )
_a = parser.parse_args_into_dataclasses()[0]
_a = TensorFlowBenchmark(args=UpperCamelCase )
try:
_a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_a = ''' '''.join(str(UpperCamelCase ).split(''' ''' )[:-1] )
_a = ''''''
_a = eval(str(UpperCamelCase ).split(''' ''' )[-1] )
_a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = full_error_msg + begin_error_msg + str(UpperCamelCase )
raise ValueError(UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 179
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( _a ):
lowercase_ = 42
lowercase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 179
| 1
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''T5Config'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> jnp.ndarray:
A_ = jnp.zeros_like(UpperCAmelCase__ )
A_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A_ = shifted_input_ids.at[:, 0].set(UpperCAmelCase__ )
A_ = jnp.where(shifted_input_ids == -1_00, UpperCAmelCase__, UpperCAmelCase__ )
return shifted_input_ids
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( _snake_case ):
lowercase = "mt5"
lowercase = MTaConfig
| 162
|
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
A_ = data
A_ = None
def __repr__( self ) -> List[str]:
'''simple docstring'''
A_ = []
A_ = self
while temp:
string_rep.append(f'''{temp.data}''' )
A_ = temp.next
return "->".join(UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
if not elements_list:
raise Exception("""The Elements List is empty""" )
A_ = A_ = Node(elements_list[0] )
for i in range(1, len(UpperCAmelCase__ ) ):
A_ = Node(elements_list[i] )
A_ = current.next
return head
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if head_node is not None and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase__ ( ) -> Optional[Any]:
from doctest import testmod
testmod()
A_ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(UpperCAmelCase__ )
print("""Elements in Reverse:""" )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 162
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: List[Any], a_: str ):
_UpperCAmelCase : str = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __UpperCAmelCase ( a_: Optional[int], a_: Tuple, a_: Any ):
_UpperCAmelCase : Any = 0
while b > 0:
if b & 1:
_UpperCAmelCase : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 364
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCAmelCase : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a_ ) ),
}, features=a_, )
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
__a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCAmelCase : Tuple = FILE_CONTENT
with open(a_, "w" ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" )
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import gzip
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCAmelCase : Any = bytes(a_, "utf-8" )
with gzip.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCAmelCase : str = bytes(a_, "utf-8" )
with lza.frame.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Any ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a_, "w" ) as archive:
archive.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
import tarfile
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
import lzma
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
with lzma.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: Tuple ):
import zipfile
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCAmelCase : int = bytes(a_, "utf-8" )
with zstd.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCAmelCase : Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a_, "w" ) as f:
f.write(a_ )
return filename
__a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : str = datasets.Dataset.from_dict(a_ )
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_UpperCAmelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str, a_: str ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a_, "rb" ) as f:
_UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ):
_UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCAmelCase : Dict = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a_, "wb" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ )
_UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : str = {"data": DATA}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ):
import gzip
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ):
import gzip
_UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : List[str] = ["0", "1", "2", "3"]
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Dict = ["0", "1", "2", "3"]
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = ["0", "1", "2", "3"]
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename("unsupported.ext" ) )
f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 17
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = StableDiffusionInpaintPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ = frozenset([] )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__)
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
A__ = CLIPTextModel(UpperCAmelCase__)
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=0) ->Optional[Any]:
'''simple docstring'''
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__)).to(UpperCAmelCase__)
A__ = image.cpu().permute(0 , 2 , 3 , 1)[0]
A__ = Image.fromarray(np.uinta(UpperCAmelCase__)).convert('''RGB''').resize((64, 64))
A__ = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((64, 64))
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInpaintPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = sd_pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase__ , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase__ , )
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = PNDMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''')
A__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 14
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a__ : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowerCamelCase = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_lowerCamelCase = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> Dict:
UpperCAmelCase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ['input_ids', 'attention_mask']
def __init__( self : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str="replace" , __snake_case : Optional[Any]="<s>" , __snake_case : Tuple="</s>" , __snake_case : Tuple="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : str="<unk>" , __snake_case : Optional[int]="<pad>" , __snake_case : str="<mask>" , __snake_case : Dict=False , **__snake_case : str , ):
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(__snake_case )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Dict ):
return len(self.encoder )
def lowerCamelCase_ ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : List[str] ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(__snake_case )
UpperCAmelCase_ = get_pairs(__snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(__snake_case ):
try:
UpperCAmelCase_ = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(__snake_case )
UpperCAmelCase_ = new_word
if len(__snake_case ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(__snake_case )
UpperCAmelCase_ = ''' '''.join(__snake_case )
UpperCAmelCase_ = word
return word
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : List[str] ):
UpperCAmelCase_ = []
for token in re.findall(self.pat , __snake_case ):
UpperCAmelCase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase_ ( self : Tuple , __snake_case : Optional[int] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Any , __snake_case : int ):
return self.decoder.get(__snake_case )
def lowerCamelCase_ ( self : str , __snake_case : Optional[Any] ):
UpperCAmelCase_ = ''''''.join(__snake_case )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase_ ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
UpperCAmelCase_ = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int=False , **__snake_case : Optional[int] ):
UpperCAmelCase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = ''' ''' + text
return (text, kwargs)
def lowerCamelCase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__snake_case )
UpperCAmelCase_ = ''' '''.join(__snake_case )
UpperCAmelCase_ = self.encode(__snake_case )
if len(__snake_case ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 363
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.