code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
_UpperCAmelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_UpperCAmelCase = 'document_qa'
_UpperCAmelCase = AutoProcessor
_UpperCAmelCase = VisionEncoderDecoderModel
_UpperCAmelCase = ['image', 'text']
_UpperCAmelCase = ['text']
def __init__( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : int ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__snake_case , **__snake_case )
def snake_case ( self : Dict , __snake_case : "Image" , __snake_case : str ):
lowerCamelCase :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCamelCase :List[str] = task_prompt.replace('''{user_input}''' , __snake_case )
lowerCamelCase :Optional[int] = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='''pt''' ).input_ids
lowerCamelCase :str = self.pre_processor(__snake_case , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case ( self : Optional[int] , __snake_case : Tuple ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def snake_case ( self : Dict , __snake_case : Optional[int] ):
lowerCamelCase :Optional[int] = self.pre_processor.batch_decode(__snake_case )[0]
lowerCamelCase :Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowerCamelCase :Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowerCamelCase :Union[str, Any] = re.sub(R'''<.*?>''' , '''''' , __snake_case , count=1 ).strip() # remove first task start token
lowerCamelCase :Tuple = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"]
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'data2vec-text'
def __init__( self : str , __snake_case : Optional[Any]=30522 , __snake_case : str=768 , __snake_case : Tuple=12 , __snake_case : List[str]=12 , __snake_case : Optional[Any]=3072 , __snake_case : Any="gelu" , __snake_case : str=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Tuple=2 , __snake_case : int=0.0_2 , __snake_case : List[Any]=1e-1_2 , __snake_case : str=1 , __snake_case : Tuple=0 , __snake_case : int=2 , __snake_case : Tuple="absolute" , __snake_case : List[Any]=True , __snake_case : int=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Tuple = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :str = hidden_dropout_prob
lowerCamelCase :Tuple = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Any = initializer_range
lowerCamelCase :Optional[Any] = layer_norm_eps
lowerCamelCase :List[str] = position_embedding_type
lowerCamelCase :Tuple = use_cache
lowerCamelCase :List[Any] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Tuple ):
if self.task == "multiple-choice":
lowerCamelCase :Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
def _lowerCamelCase ( a_ : float):
return 10 - x * x
def _lowerCamelCase ( a_ : float , a_ : float):
# Bolzano theory in order to find if there is a root between a and b
if equation(a_) * equation(a_) >= 0:
raise ValueError('''Wrong space!''')
lowerCamelCase :List[str] = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase :Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(a_) == 0.0:
break
# Decide the side to repeat the steps
if equation(a_) * equation(a_) < 0:
lowerCamelCase :str = c
else:
lowerCamelCase :Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
from __future__ import annotations
A__ = 10
def _lowerCamelCase ( a_ : list[int]):
lowerCamelCase :Any = 1
lowerCamelCase :List[str] = max(a_)
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase :list[list] = [[] for _ in range(a_)]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase :Tuple = int((i / placement) % RADIX)
buckets[tmp].append(a_)
# put each buckets' contents into list_of_ints
lowerCamelCase :Optional[int] = 0
for b in range(a_):
for i in buckets[b]:
lowerCamelCase :List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""PoolFormerFeatureExtractor"""]
A__ = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
def _lowerCamelCase ( a_ : int = 50_00_00_00):
lowerCamelCase :int = set()
lowerCamelCase :Union[str, Any] = int((limit - 24) ** (1 / 2))
lowerCamelCase :Optional[Any] = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_)))
for primea in primes:
lowerCamelCase :Any = primea * primea
for primea in primes:
lowerCamelCase :Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase :List[Any] = primea * primea * primea * primea
lowerCamelCase :Dict = square + cube + tetr
if total >= limit:
break
ret.add(a_)
return len(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : str , a_ : Any , a_ : Union[str, Any] , a_ : int):
# Load configuration defined in the metadata file
with open(a_) as metadata_file:
lowerCamelCase :Union[str, Any] = json.load(a_)
lowerCamelCase :Union[str, Any] = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config'''])
# Load in the weights from the checkpoint_path
lowerCamelCase :List[str] = torch.load(a_ , map_location='''cpu''')
# Load the entity vocab file
lowerCamelCase :List[str] = load_entity_vocab(a_)
lowerCamelCase :Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''])
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase :int = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_)
lowerCamelCase :Optional[Any] = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_)
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}")
tokenizer.save_pretrained(a_)
with open(os.path.join(a_ , LukeTokenizer.vocab_files_names['''entity_vocab_file''']) , '''w''') as f:
json.dump(a_ , a_)
lowerCamelCase :str = LukeTokenizer.from_pretrained(a_)
# Initialize the embeddings of the special tokens
lowerCamelCase :Union[str, Any] = state_dict['''embeddings.word_embeddings.weight''']
lowerCamelCase :Any = word_emb[tokenizer.convert_tokens_to_ids(['''@'''])[0]].unsqueeze(0)
lowerCamelCase :List[Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''])[0]].unsqueeze(0)
lowerCamelCase :Dict = torch.cat([word_emb, ent_emb, enta_emb])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase :Tuple = F"encoder.layer.{layer_index}.attention.self."
lowerCamelCase :List[Any] = state_dict[prefix + matrix_name]
lowerCamelCase :Any = state_dict[prefix + matrix_name]
lowerCamelCase :str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase :Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCamelCase :List[Any] = entity_emb[entity_vocab['''[MASK]''']]
lowerCamelCase :int = LukeModel(config=a_).eval()
lowerCamelCase , lowerCamelCase :List[Any] = model.load_state_dict(a_ , strict=a_)
if not (len(a_) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(a_)}. Expected only missing embeddings.position_ids")
if not (all(key.startswith('''entity_predictions''') or key.startswith('''lm_head''') for key in unexpected_keys)):
raise ValueError(
'''Unexpected keys'''
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions') or key.startswith('lm_head'))])}")
# Check outputs
lowerCamelCase :Tuple = LukeTokenizer.from_pretrained(a_ , task='''entity_classification''')
lowerCamelCase :int = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowerCamelCase :int = (39, 42)
lowerCamelCase :Optional[Any] = tokenizer(a_ , entity_spans=[span] , add_prefix_space=a_ , return_tensors='''pt''')
lowerCamelCase :List[str] = model(**a_)
# Verify word hidden states
if model_size == "large":
lowerCamelCase :List[Any] = torch.Size((1, 42, 10_24))
lowerCamelCase :Union[str, Any] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]])
else: # base
lowerCamelCase :Optional[Any] = torch.Size((1, 42, 7_68))
lowerCamelCase :List[Any] = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}")
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCamelCase :List[Any] = torch.Size((1, 1, 10_24))
lowerCamelCase :int = torch.tensor([[0.0_466, -0.0_106, -0.0_179]])
else: # base
lowerCamelCase :Optional[int] = torch.Size((1, 1, 7_68))
lowerCamelCase :List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]])
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}")
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1e-4):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a_))
model.save_pretrained(a_)
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :Optional[Any] = {}
with open(a_ , '''r''' , encoding='''utf-8''') as f:
for index, line in enumerate(a_):
lowerCamelCase , lowerCamelCase :Tuple = line.rstrip().split('''\t''')
lowerCamelCase :str = index
return entity_vocab
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
A__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
from __future__ import annotations
from collections.abc import MutableSequence
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : int , __snake_case : MutableSequence[float] ):
if len(__snake_case ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowerCamelCase :list[float] = list(__snake_case )
lowerCamelCase :Tuple = degree
def __add__( self : Dict , __snake_case : Polynomial ):
if self.degree > polynomial_a.degree:
lowerCamelCase :Optional[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __snake_case )
else:
lowerCamelCase :List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __snake_case )
def __sub__( self : str , __snake_case : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __snake_case : Polynomial ):
lowerCamelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __snake_case )
def snake_case ( self : Tuple , __snake_case : int | float ):
lowerCamelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ):
lowerCamelCase :Tuple = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__snake_case )
return polynomial
def __repr__( self : Dict ):
return self.__str__()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCamelCase :List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __snake_case )
def snake_case ( self : Optional[int] , __snake_case : int | float = 0 ):
lowerCamelCase :list[float] = [0] * (self.degree + 2)
lowerCamelCase :List[str] = constant
for i in range(self.degree + 1 ):
lowerCamelCase :Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __snake_case )
def __eq__( self : Union[str, Any] , __snake_case : object ):
if not isinstance(__snake_case , __snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , __snake_case : object ):
return not self.__eq__(__snake_case )
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
def _lowerCamelCase ( a_ : List[str]=2_81_23):
lowerCamelCase :Tuple = [1] * (limit + 1)
for i in range(2 , int(limit**0.5) + 1):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1):
sum_divs[k * i] += k + i
lowerCamelCase :Any = set()
lowerCamelCase :Union[str, Any] = 0
for n in range(1 , limit + 1):
if sum_divs[n] > n:
abundants.add(a_)
if not any((n - a in abundants) for a in abundants):
res += n
return res
if __name__ == "__main__":
print(solution())
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A__ = parser.parse_args()
A__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def snake_case ( self : Any ):
lowerCamelCase :List[str] = tempfile.mkdtemp()
lowerCamelCase :List[str] = 8
# DPR tok
lowerCamelCase :str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase :Dict = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase :List[str] = os.path.join(__snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCamelCase :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Any = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase :Dict = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Optional[int] = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : List[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case ( self : Optional[Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case ( self : Optional[int] ):
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
lowerCamelCase :Union[str, Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCamelCase :Union[str, Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__snake_case )
rag_tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
lowerCamelCase :Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCamelCase :Dict = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
lowerCamelCase :List[Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
lowerCamelCase :str = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
| 49
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 1
|
# Imports
import numpy as np
class _lowerCAmelCase :
def __init__( self : List[Any] , __snake_case : List[Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Union[str, Any]=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Optional[Any]=None ):
if red is not None:
lowerCamelCase :Union[str, Any] = red
if green is not None:
lowerCamelCase :Tuple = green
if blue is not None:
lowerCamelCase :Optional[Any] = blue
if red_edge is not None:
lowerCamelCase :Any = red_edge
if nir is not None:
lowerCamelCase :List[Any] = nir
return True
def snake_case ( self : Dict , __snake_case : Dict="" , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Any=None , __snake_case : Any=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
lowerCamelCase :List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def snake_case ( self : List[Any] ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case ( self : List[str] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case ( self : Optional[int] ):
return self.nir * (self.red / (self.green**2))
def snake_case ( self : List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case ( self : Optional[Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def snake_case ( self : str ):
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case ( self : List[Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case ( self : List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def snake_case ( self : str ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case ( self : Optional[int] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case ( self : List[str] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case ( self : List[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case ( self : Optional[Any] , __snake_case : str=0.0_8 , __snake_case : Optional[int]=1.2_2 , __snake_case : Dict=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case ( self : List[str] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case ( self : Dict ):
return (self.nir / self.green) - 1
def snake_case ( self : str ):
return (self.nir / self.redEdge) - 1
def snake_case ( self : Any ):
return (self.red - self.blue) / self.red
def snake_case ( self : Any ):
lowerCamelCase :int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case ( self : Any ):
return self.nir - self.green
def snake_case ( self : Optional[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def snake_case ( self : Optional[int] , __snake_case : str=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case ( self : int , __snake_case : Optional[Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case ( self : Tuple ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def snake_case ( self : Tuple , __snake_case : Dict=None , __snake_case : Dict=None ):
return (self.nir - b) / (a * self.red)
def snake_case ( self : Dict ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case ( self : List[Any] ):
return (self.red + self.green + self.blue) / 3_0.5
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case ( self : Dict ):
return self.green / (self.nir + self.red + self.green)
def snake_case ( self : Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def snake_case ( self : int ):
return self.red / (self.nir + self.red + self.green)
def snake_case ( self : Any ):
return (self.green - self.red) / (self.green + self.red)
def snake_case ( self : str ):
return (self.red - self.green) / (self.red + self.green)
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase :Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case ( self : Optional[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : Any ):
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case ( self : int ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''')
lowerCamelCase :str = hex_num[0] == '''-'''
if is_negative:
lowerCamelCase :int = hex_num[1:]
try:
lowerCamelCase :int = int(a_ , 16)
except ValueError:
raise ValueError('''Invalid value was passed to the function''')
lowerCamelCase :List[Any] = ''''''
while int_num > 0:
lowerCamelCase :Optional[Any] = str(int_num % 2) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__ = TypeVar("""KT""")
A__ = TypeVar("""VT""")
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : List[str] , __snake_case : KT | str = "root" , __snake_case : VT | None = None ):
lowerCamelCase :int = key
lowerCamelCase :List[str] = value
lowerCamelCase :list[Node[KT, VT]] = []
def __repr__( self : str ):
return F"Node({self.key}: {self.value})"
@property
def snake_case ( self : int ):
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : List[str] , __snake_case : float = 0.5 , __snake_case : int = 16 ):
lowerCamelCase :Node[KT, VT] = Node[KT, VT]()
lowerCamelCase :List[str] = 0
lowerCamelCase :Tuple = p
lowerCamelCase :List[str] = max_level
def __str__( self : Tuple ):
lowerCamelCase :Optional[Any] = list(self )
if len(__snake_case ) == 0:
return F"SkipList(level={self.level})"
lowerCamelCase :Dict = max((len(str(__snake_case ) ) for item in items) , default=4 )
lowerCamelCase :Union[str, Any] = max(__snake_case , 4 ) + 4
lowerCamelCase :Optional[Any] = self.head
lowerCamelCase :Dict = []
lowerCamelCase :Optional[Any] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__snake_case , '''-''' ) + '''* ''' * len(__snake_case ) )
lines.append(''' ''' * label_size + '''| ''' * len(__snake_case ) )
while len(node.forward ) != 0:
lowerCamelCase :int = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__snake_case , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__snake_case ) )
lowerCamelCase :Tuple = node.forward
lines.append('''None'''.ljust(__snake_case ) + '''* ''' * len(__snake_case ) )
return F"SkipList(level={self.level})\n" + "\n".join(__snake_case )
def __iter__( self : Optional[int] ):
lowerCamelCase :Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCamelCase :int = node.forward[0]
def snake_case ( self : List[Any] ):
lowerCamelCase :int = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self : int , __snake_case : Any ):
lowerCamelCase :List[Any] = []
lowerCamelCase :Union[str, Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCamelCase :Tuple = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self : int , __snake_case : KT ):
lowerCamelCase , lowerCamelCase :Tuple = self._locate_node(__snake_case )
if node is not None:
for i, update_node in enumerate(__snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCamelCase :str = node.forward[i]
else:
lowerCamelCase :Union[str, Any] = update_node.forward[:i]
def snake_case ( self : Dict , __snake_case : KT , __snake_case : VT ):
lowerCamelCase , lowerCamelCase :Any = self._locate_node(__snake_case )
if node is not None:
lowerCamelCase :Union[str, Any] = value
else:
lowerCamelCase :Union[str, Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __snake_case ):
update_vector.append(self.head )
lowerCamelCase :Union[str, Any] = level
lowerCamelCase :Any = Node(__snake_case , __snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__snake_case )
else:
lowerCamelCase :Optional[int] = new_node
def snake_case ( self : Optional[Any] , __snake_case : VT ):
lowerCamelCase , lowerCamelCase :List[str] = self._locate_node(__snake_case )
if node is not None:
return node.value
return None
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = SkipList()
skip_list.insert('''Key1''' , 3)
skip_list.insert('''Key2''' , 12)
skip_list.insert('''Key3''' , 41)
skip_list.insert('''Key4''' , -19)
lowerCamelCase :Any = skip_list.head
lowerCamelCase :List[Any] = {}
while node.level != 0:
lowerCamelCase :List[Any] = node.forward[0]
lowerCamelCase :Optional[int] = node.value
assert len(a_) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = SkipList()
skip_list.insert('''Key1''' , 10)
skip_list.insert('''Key1''' , 12)
skip_list.insert('''Key5''' , 7)
skip_list.insert('''Key7''' , 10)
skip_list.insert('''Key10''' , 5)
skip_list.insert('''Key7''' , 7)
skip_list.insert('''Key5''' , 5)
skip_list.insert('''Key10''' , 10)
lowerCamelCase :Tuple = skip_list.head
lowerCamelCase :int = {}
while node.level != 0:
lowerCamelCase :Tuple = node.forward[0]
lowerCamelCase :Dict = node.value
if len(a_) != 4:
print()
assert len(a_) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = SkipList()
assert skip_list.find('''Some key''') is None
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = SkipList()
skip_list.insert('''Key2''' , 20)
assert skip_list.find('''Key2''') == 20
skip_list.insert('''Some Key''' , 10)
skip_list.insert('''Key2''' , 8)
skip_list.insert('''V''' , 13)
assert skip_list.find('''Y''') is None
assert skip_list.find('''Key2''') == 8
assert skip_list.find('''Some Key''') == 10
assert skip_list.find('''V''') == 13
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = SkipList()
skip_list.delete('''Some key''')
assert len(skip_list.head.forward) == 0
def _lowerCamelCase ( ):
lowerCamelCase :str = SkipList()
skip_list.insert('''Key1''' , 12)
skip_list.insert('''V''' , 13)
skip_list.insert('''X''' , 14)
skip_list.insert('''Key2''' , 15)
skip_list.delete('''V''')
skip_list.delete('''Key2''')
assert skip_list.find('''V''') is None
assert skip_list.find('''Key2''') is None
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = SkipList()
skip_list.insert('''Key1''' , 12)
skip_list.insert('''V''' , 13)
skip_list.insert('''X''' , 14)
skip_list.insert('''Key2''' , 15)
skip_list.delete('''V''')
assert skip_list.find('''V''') is None
assert skip_list.find('''X''') == 14
assert skip_list.find('''Key1''') == 12
assert skip_list.find('''Key2''') == 15
skip_list.delete('''X''')
assert skip_list.find('''V''') is None
assert skip_list.find('''X''') is None
assert skip_list.find('''Key1''') == 12
assert skip_list.find('''Key2''') == 15
skip_list.delete('''Key1''')
assert skip_list.find('''V''') is None
assert skip_list.find('''X''') is None
assert skip_list.find('''Key1''') is None
assert skip_list.find('''Key2''') == 15
skip_list.delete('''Key2''')
assert skip_list.find('''V''') is None
assert skip_list.find('''X''') is None
assert skip_list.find('''Key1''') is None
assert skip_list.find('''Key2''') is None
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = SkipList()
skip_list.insert('''Key1''' , 12)
skip_list.insert('''V''' , 13)
skip_list.insert('''X''' , 1_42)
skip_list.insert('''Key2''' , 15)
skip_list.delete('''X''')
def traverse_keys(a_ : Dict):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(a_)
assert len(set(traverse_keys(skip_list.head))) == 4
def _lowerCamelCase ( ):
def is_sorted(a_ : List[str]):
return all(next_item >= item for item, next_item in zip(a_ , lst[1:]))
lowerCamelCase :str = SkipList()
for i in range(10):
skip_list.insert(a_ , a_)
assert is_sorted(list(a_))
skip_list.delete(5)
skip_list.delete(8)
skip_list.delete(2)
assert is_sorted(list(a_))
skip_list.insert(-12 , -12)
skip_list.insert(77 , 77)
assert is_sorted(list(a_))
def _lowerCamelCase ( ):
for _ in range(1_00):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCamelCase ( ):
lowerCamelCase :Any = SkipList()
skip_list.insert(2 , '''2''')
skip_list.insert(4 , '''4''')
skip_list.insert(6 , '''4''')
skip_list.insert(4 , '''5''')
skip_list.insert(8 , '''4''')
skip_list.insert(9 , '''4''')
skip_list.delete(4)
print(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 49
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 1
|
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCamelCase :Dict = set()
# Replace all the whitespace in our sentence
lowerCamelCase :int = input_str.replace(''' ''' , '''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(a_) == 26
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCamelCase :Any = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase :List[Any] = True
elif char.isupper():
lowerCamelCase :List[Any] = True
return all(a_)
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def _lowerCamelCase ( ):
from timeit import timeit
lowerCamelCase :Union[str, Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=a_))
print(timeit('''is_pangram_faster()''' , setup=a_))
print(timeit('''is_pangram_fastest()''' , setup=a_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , __snake_case : Distribution , __snake_case : Any=None , __snake_case : Any=None , __snake_case : List[Any]=0 ):
lowerCamelCase :Dict = 1.0 if scale is None else scale
lowerCamelCase :Any = 0.0 if loc is None else loc
super().__init__(__snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__snake_case )] )
@property
def snake_case ( self : Optional[Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self : Tuple ):
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self : Dict ):
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : int , __snake_case : Dict[str, int] , __snake_case : Callable[..., Tuple[torch.Tensor]] , **__snake_case : str ):
super().__init__(**__snake_case )
lowerCamelCase :List[Any] = args_dim
lowerCamelCase :Any = nn.ModuleList([nn.Linear(__snake_case , __snake_case ) for dim in args_dim.values()] )
lowerCamelCase :Optional[int] = domain_map
def snake_case ( self : int , __snake_case : torch.Tensor ):
lowerCamelCase :int = [proj(__snake_case ) for proj in self.proj]
return self.domain_map(*__snake_case )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : Optional[int] ):
super().__init__()
lowerCamelCase :Union[str, Any] = function
def snake_case ( self : List[str] , __snake_case : List[str] , *__snake_case : str ):
return self.function(__snake_case , *__snake_case )
class _lowerCAmelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Tuple , __snake_case : int = 1 ):
lowerCamelCase :Any = dim
lowerCamelCase :Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
if self.dim == 1:
return self.distribution_class(*__snake_case )
else:
return Independent(self.distribution_class(*__snake_case ) , 1 )
def snake_case ( self : str , __snake_case : Tuple , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ):
lowerCamelCase :Dict = self._base_distribution(__snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__snake_case , loc=__snake_case , scale=__snake_case , event_dim=self.event_dim )
@property
def snake_case ( self : str ):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self : Any ):
return len(self.event_shape )
@property
def snake_case ( self : Optional[Any] ):
return 0.0
def snake_case ( self : Tuple , __snake_case : int ):
return ParameterProjection(
in_features=__snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case ( self : Optional[int] , *__snake_case : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def snake_case ( __snake_case : torch.Tensor ):
return (x + torch.sqrt(torch.square(__snake_case ) + 4.0 )) / 2.0
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_UpperCAmelCase = StudentT
@classmethod
def snake_case ( cls : Any , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :List[str] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase :Dict = 2.0 + cls.squareplus(__snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {"loc": 1, "scale": 1}
_UpperCAmelCase = Normal
@classmethod
def snake_case ( cls : Optional[int] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Union[str, Any] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {"total_count": 1, "logits": 1}
_UpperCAmelCase = NegativeBinomial
@classmethod
def snake_case ( cls : List[Any] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Dict = cls.squareplus(__snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self : Tuple , __snake_case : Optional[int] ):
lowerCamelCase , lowerCamelCase :Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__snake_case , logits=__snake_case )
else:
return Independent(self.distribution_class(total_count=__snake_case , logits=__snake_case ) , 1 )
def snake_case ( self : int , __snake_case : str , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None ):
lowerCamelCase , lowerCamelCase :int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'pegasus'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , __snake_case : List[Any]=50265 , __snake_case : Any=1024 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]=4096 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]=12 , __snake_case : List[Any]=4096 , __snake_case : Any=16 , __snake_case : Optional[int]=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Tuple="gelu" , __snake_case : Union[str, Any]=1024 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=0.0 , __snake_case : int=0.0_2 , __snake_case : Optional[int]=0 , __snake_case : Union[str, Any]=False , __snake_case : Any=0 , __snake_case : Any=1 , __snake_case : List[Any]=1 , **__snake_case : Tuple , ):
lowerCamelCase :Any = vocab_size
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :Optional[int] = d_model
lowerCamelCase :int = encoder_ffn_dim
lowerCamelCase :Union[str, Any] = encoder_layers
lowerCamelCase :Tuple = encoder_attention_heads
lowerCamelCase :List[str] = decoder_ffn_dim
lowerCamelCase :Dict = decoder_layers
lowerCamelCase :Tuple = decoder_attention_heads
lowerCamelCase :List[Any] = dropout
lowerCamelCase :List[Any] = attention_dropout
lowerCamelCase :int = activation_dropout
lowerCamelCase :str = activation_function
lowerCamelCase :str = init_std
lowerCamelCase :Any = encoder_layerdrop
lowerCamelCase :Dict = decoder_layerdrop
lowerCamelCase :Union[str, Any] = use_cache
lowerCamelCase :int = encoder_layers
lowerCamelCase :Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def snake_case ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'camembert'
def __init__( self : int , __snake_case : Union[str, Any]=30522 , __snake_case : Optional[int]=768 , __snake_case : List[Any]=12 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]=3072 , __snake_case : Any="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Any=0.1 , __snake_case : int=512 , __snake_case : Any=2 , __snake_case : List[str]=0.0_2 , __snake_case : List[str]=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : int=0 , __snake_case : Union[str, Any]=2 , __snake_case : Union[str, Any]="absolute" , __snake_case : Any=True , __snake_case : Optional[int]=None , **__snake_case : Any , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[Any] = vocab_size
lowerCamelCase :List[str] = hidden_size
lowerCamelCase :str = num_hidden_layers
lowerCamelCase :str = num_attention_heads
lowerCamelCase :Optional[Any] = hidden_act
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :Tuple = hidden_dropout_prob
lowerCamelCase :Dict = attention_probs_dropout_prob
lowerCamelCase :Union[str, Any] = max_position_embeddings
lowerCamelCase :Optional[int] = type_vocab_size
lowerCamelCase :Any = initializer_range
lowerCamelCase :List[Any] = layer_norm_eps
lowerCamelCase :Tuple = position_embedding_type
lowerCamelCase :Tuple = use_cache
lowerCamelCase :Dict = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Dict ):
if self.task == "multiple-choice":
lowerCamelCase :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 1
|
def _lowerCamelCase ( a_ : int = 60_08_51_47_51_43):
try:
lowerCamelCase :Optional[Any] = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
lowerCamelCase :List[str] = 2
lowerCamelCase :Tuple = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase :Optional[Any] = i
while n % i == 0:
lowerCamelCase :Union[str, Any] = n // i
i += 1
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 49
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A__ = logging.get_logger(__name__)
class _lowerCAmelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = None
@staticmethod
def snake_case ( ):
raise NotImplementedError
def snake_case ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str , **__snake_case : str ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] , __snake_case : List[str] ):
raise NotImplementedError
def snake_case ( self : Optional[Any] ):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def snake_case ( cls : Optional[Any] ):
return F"`pip install {cls.pip_package or cls.name}`"
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'optuna'
@staticmethod
def snake_case ( ):
return is_optuna_available()
def snake_case ( self : Optional[int] , __snake_case : Any , __snake_case : int , __snake_case : str , **__snake_case : Dict ):
return run_hp_search_optuna(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : List[str] , __snake_case : List[str] ):
return default_hp_space_optuna(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'ray'
_UpperCAmelCase = '\'ray[tune]\''
@staticmethod
def snake_case ( ):
return is_ray_available()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : int , __snake_case : str , **__snake_case : Optional[int] ):
return run_hp_search_ray(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Tuple , __snake_case : Any ):
return default_hp_space_ray(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'sigopt'
@staticmethod
def snake_case ( ):
return is_sigopt_available()
def snake_case ( self : Any , __snake_case : str , __snake_case : int , __snake_case : str , **__snake_case : Optional[int] ):
return run_hp_search_sigopt(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] ):
return default_hp_space_sigopt(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'wandb'
@staticmethod
def snake_case ( ):
return is_wandb_available()
def snake_case ( self : str , __snake_case : Any , __snake_case : int , __snake_case : str , **__snake_case : List[Any] ):
return run_hp_search_wandb(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : List[Any] ):
return default_hp_space_wandb(__snake_case )
A__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_) > 0:
lowerCamelCase :Optional[int] = available_backends[0].name
if len(a_) > 1:
logger.info(
F"{len(a_)} hyperparameter search backends available. Using {name} as the default.")
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( ):
lowerCamelCase :int = 10
lowerCamelCase :Tuple = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''')),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''])),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string'''),
'''answer_start''': datasets.Value('''int32'''),
}),
'''id''': datasets.Value('''int64'''),
})
lowerCamelCase :Tuple = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(a_)),
} , features=a_ , )
return dataset
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any] , a_ : List[str]):
lowerCamelCase :Optional[Any] = str(tmp_path_factory.mktemp('''data''') / '''file.arrow''')
dataset.map(cache_file_name=a_)
return filename
# FILE_CONTENT + files
A__ = """\
Text data.
Second line of data."""
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict):
lowerCamelCase :int = tmp_path_factory.mktemp('''data''') / '''file.txt'''
lowerCamelCase :Optional[int] = FILE_CONTENT
with open(a_ , '''w''') as f:
f.write(a_)
return filename
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Optional[int]):
import bza
lowerCamelCase :List[Any] = tmp_path_factory.mktemp('''data''') / '''file.txt.bz2'''
lowerCamelCase :Any = bytes(a_ , '''utf-8''')
with bza.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Any):
import gzip
lowerCamelCase :Any = str(tmp_path_factory.mktemp('''data''') / '''file.txt.gz''')
lowerCamelCase :str = bytes(a_ , '''utf-8''')
with gzip.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Tuple):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase :Union[str, Any] = tmp_path_factory.mktemp('''data''') / '''file.txt.lz4'''
lowerCamelCase :List[Any] = bytes(a_ , '''utf-8''')
with lza.frame.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : str):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase :Dict = tmp_path_factory.mktemp('''data''') / '''file.txt.7z'''
with pyazr.SevenZipFile(a_ , '''w''') as archive:
archive.write(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int , a_ : Any):
import tarfile
lowerCamelCase :Tuple = tmp_path_factory.mktemp('''data''') / '''file.txt.tar'''
with tarfile.TarFile(a_ , '''w''') as f:
f.add(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int):
import lzma
lowerCamelCase :int = tmp_path_factory.mktemp('''data''') / '''file.txt.xz'''
lowerCamelCase :Tuple = bytes(a_ , '''utf-8''')
with lzma.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any] , a_ : int):
import zipfile
lowerCamelCase :Dict = tmp_path_factory.mktemp('''data''') / '''file.txt.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase :str = tmp_path_factory.mktemp('''data''') / '''file.txt.zst'''
lowerCamelCase :Tuple = bytes(a_ , '''utf-8''')
with zstd.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :str = tmp_path_factory.mktemp('''data''') / '''file.xml'''
lowerCamelCase :int = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''')
with open(a_ , '''w''') as f:
f.write(a_)
return filename
A__ = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
A__ = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
A__ = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
A__ = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
A__ = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :List[str] = datasets.Dataset.from_dict(a_)
lowerCamelCase :Tuple = str(tmp_path_factory.mktemp('''data''') / '''dataset.arrow''')
dataset.map(cache_file_name=a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : str):
lowerCamelCase :int = str(tmp_path_factory.mktemp('''data''') / '''dataset.sqlite''')
with contextlib.closing(sqlitea.connect(a_)) as con:
lowerCamelCase :Union[str, Any] = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''')
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :Any = str(tmp_path_factory.mktemp('''data''') / '''dataset.csv''')
with open(a_ , '''w''' , newline='''''') as f:
lowerCamelCase :Optional[int] = csv.DictWriter(a_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''])
writer.writeheader()
for item in DATA:
writer.writerow(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :Optional[Any] = str(tmp_path_factory.mktemp('''data''') / '''dataset2.csv''')
with open(a_ , '''w''' , newline='''''') as f:
lowerCamelCase :Dict = csv.DictWriter(a_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''])
writer.writeheader()
for item in DATA:
writer.writerow(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : str , a_ : Optional[int]):
import bza
lowerCamelCase :Optional[Any] = tmp_path_factory.mktemp('''data''') / '''dataset.csv.bz2'''
with open(a_ , '''rb''') as f:
lowerCamelCase :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_ , '''wb''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict , a_ : List[str] , a_ : str):
lowerCamelCase :Optional[Any] = tmp_path_factory.mktemp('''data''') / '''dataset.csv.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(a_))
f.write(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict , a_ : Optional[Any] , a_ : List[str]):
lowerCamelCase :Any = tmp_path_factory.mktemp('''data''') / '''dataset.csv.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''')))
f.write(a_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''')))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Any , a_ : str , a_ : str):
lowerCamelCase :List[str] = tmp_path_factory.mktemp('''data''') / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :Tuple = str(tmp_path_factory.mktemp('''data''') / '''dataset.parquet''')
lowerCamelCase :Any = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
})
with open(a_ , '''wb''') as f:
lowerCamelCase :Dict = pq.ParquetWriter(a_ , schema=a_)
lowerCamelCase :Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_))] for k in DATA[0]} , schema=a_)
writer.write_table(a_)
writer.close()
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : str):
lowerCamelCase :List[str] = str(tmp_path_factory.mktemp('''data''') / '''dataset.json''')
lowerCamelCase :int = {'''data''': DATA}
with open(a_ , '''w''') as f:
json.dump(a_ , a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Optional[int] = str(tmp_path_factory.mktemp('''data''') / '''dataset.json''')
lowerCamelCase :str = {'''data''': DATA_DICT_OF_LISTS}
with open(a_ , '''w''') as f:
json.dump(a_ , a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Optional[Any] = str(tmp_path_factory.mktemp('''data''') / '''dataset.jsonl''')
with open(a_ , '''w''') as f:
for item in DATA:
f.write(json.dumps(a_) + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :int = str(tmp_path_factory.mktemp('''data''') / '''dataset2.jsonl''')
with open(a_ , '''w''') as f:
for item in DATA:
f.write(json.dumps(a_) + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Dict = str(tmp_path_factory.mktemp('''data''') / '''dataset_312.jsonl''')
with open(a_ , '''w''') as f:
for item in DATA_312:
f.write(json.dumps(a_) + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :Dict = str(tmp_path_factory.mktemp('''data''') / '''dataset-str.jsonl''')
with open(a_ , '''w''') as f:
for item in DATA_STR:
f.write(json.dumps(a_) + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Any , a_ : str):
import gzip
lowerCamelCase :int = str(tmp_path_factory.mktemp('''data''') / '''dataset.txt.gz''')
with open(a_ , '''rb''') as orig_file:
with gzip.open(a_ , '''wb''') as zipped_file:
zipped_file.writelines(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[str] , a_ : str):
import gzip
lowerCamelCase :List[Any] = str(tmp_path_factory.mktemp('''data''') / '''dataset.jsonl.gz''')
with open(a_ , '''rb''') as orig_file:
with gzip.open(a_ , '''wb''') as zipped_file:
zipped_file.writelines(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : str , a_ : Optional[Any] , a_ : Optional[int]):
lowerCamelCase :Dict = tmp_path_factory.mktemp('''data''') / '''dataset.jsonl.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(a_))
f.write(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Tuple , a_ : Tuple):
lowerCamelCase :List[str] = tmp_path_factory.mktemp('''data''') / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.join('''nested''' , os.path.basename(a_)))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Optional[int] , a_ : List[str] , a_ : Dict):
lowerCamelCase :List[Any] = tmp_path_factory.mktemp('''data''') / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[Any]):
lowerCamelCase :List[Any] = tmp_path_factory.mktemp('''data''') / '''dataset.jsonl.tar'''
with tarfile.TarFile(a_ , '''w''') as f:
f.add(a_ , arcname=os.path.basename(a_))
f.add(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any]):
lowerCamelCase :Union[str, Any] = tmp_path_factory.mktemp('''data''') / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(a_ , '''w''') as f:
f.add(a_ , arcname=os.path.join('''nested''' , os.path.basename(a_)))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict):
lowerCamelCase :List[str] = ['''0''', '''1''', '''2''', '''3''']
lowerCamelCase :str = str(tmp_path_factory.mktemp('''data''') / '''dataset.txt''')
with open(a_ , '''w''') as f:
for item in data:
f.write(item + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Optional[int] = ['''0''', '''1''', '''2''', '''3''']
lowerCamelCase :Any = str(tmp_path_factory.mktemp('''data''') / '''dataset2.txt''')
with open(a_ , '''w''') as f:
for item in data:
f.write(item + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :Optional[Any] = ['''0''', '''1''', '''2''', '''3''']
lowerCamelCase :Any = tmp_path_factory.mktemp('''data''') / '''dataset.abc'''
with open(a_ , '''w''') as f:
for item in data:
f.write(item + '''\n''')
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int]):
lowerCamelCase :Dict = tmp_path_factory.mktemp('''data''') / '''dataset.text.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(a_))
f.write(a_ , arcname=os.path.basename(a_))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[int]):
lowerCamelCase :int = tmp_path_factory.mktemp('''data''') / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
f.write(a_ , arcname=os.path.join('''main_dir''' , os.path.basename(a_)))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : int , a_ : Optional[int] , a_ : Union[str, Any]):
lowerCamelCase :int = tmp_path_factory.mktemp('''data''') / '''dataset.ext.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename('''unsupported.ext'''))
f.write(a_ , arcname=os.path.basename('''unsupported_2.ext'''))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Dict):
lowerCamelCase :Optional[int] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''])
lowerCamelCase :int = str(tmp_path_factory.mktemp('''data''') / '''dataset_with_unicode_new_lines.txt''')
with open(a_ , '''w''' , encoding='''utf-8''') as f:
f.write(a_)
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''')
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''')
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : Tuple , a_ : Union[str, Any]):
lowerCamelCase :Any = tmp_path_factory.mktemp('''data''') / '''dataset.img.zip'''
with zipfile.ZipFile(a_ , '''w''') as f:
f.write(a_ , arcname=os.path.basename(a_))
f.write(a_ , arcname=os.path.basename(a_).replace('''.jpg''' , '''2.jpg'''))
return path
@pytest.fixture(scope='''session''')
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :List[str] = tmp_path_factory.mktemp('''data_dir''')
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''') as f:
f.write('''foo\n''' * 10)
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''') as f:
f.write('''bar\n''' * 10)
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''') as f:
f.write('''bar\n''' * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''') as f:
f.write('''foo\n''' * 10)
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''') as f:
f.write('''bar\n''' * 10)
return data_dir
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : int , __snake_case : Optional[int]=14 , __snake_case : Dict=7 , __snake_case : str=True , __snake_case : int=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=99 , __snake_case : Dict=32 , __snake_case : List[Any]=5 , __snake_case : Tuple=4 , __snake_case : List[str]=37 , __snake_case : str="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : List[Any]=512 , __snake_case : Any=16 , __snake_case : int=2 , __snake_case : Union[str, Any]=0.0_2 , __snake_case : Optional[int]=3 , __snake_case : Any=4 , __snake_case : str=None , ):
lowerCamelCase :Any = parent
lowerCamelCase :List[str] = batch_size
lowerCamelCase :Union[str, Any] = seq_length
lowerCamelCase :int = is_training
lowerCamelCase :Optional[int] = use_token_type_ids
lowerCamelCase :Optional[int] = use_input_mask
lowerCamelCase :Dict = use_labels
lowerCamelCase :Any = use_mc_token_ids
lowerCamelCase :str = vocab_size
lowerCamelCase :Optional[int] = hidden_size
lowerCamelCase :str = num_hidden_layers
lowerCamelCase :Union[str, Any] = num_attention_heads
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :Dict = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Optional[Any] = attention_probs_dropout_prob
lowerCamelCase :Optional[int] = max_position_embeddings
lowerCamelCase :Tuple = type_vocab_size
lowerCamelCase :int = type_sequence_label_size
lowerCamelCase :Any = initializer_range
lowerCamelCase :List[str] = num_labels
lowerCamelCase :Any = num_choices
lowerCamelCase :Union[str, Any] = scope
lowerCamelCase :int = self.vocab_size - 1
def snake_case ( self : int ):
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :str = None
if self.use_input_mask:
lowerCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :int = None
if self.use_token_type_ids:
lowerCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :Tuple = None
if self.use_mc_token_ids:
lowerCamelCase :Tuple = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase :List[str] = None
lowerCamelCase :Any = None
lowerCamelCase :List[Any] = None
if self.use_labels:
lowerCamelCase :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase :Tuple = self.get_config()
lowerCamelCase :List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Any ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case ( self : str , __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : List[str] , __snake_case : Dict , *__snake_case : List[str] ):
lowerCamelCase :Dict = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case , token_type_ids=__snake_case , head_mask=__snake_case )
model(__snake_case , token_type_ids=__snake_case )
lowerCamelCase :Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case ( self : str , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Dict , *__snake_case : str ):
lowerCamelCase :List[str] = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Optional[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) :Optional[int] = config_and_inputs
lowerCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def snake_case ( self : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : int , __snake_case : List[Any] , *__snake_case : str ):
lowerCamelCase :Tuple = self.num_labels
lowerCamelCase :Optional[int] = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :List[str] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
_UpperCAmelCase = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : str , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[int] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case ( self : str ):
lowerCamelCase :List[str] = CTRLModelTester(self )
lowerCamelCase :Tuple = ConfigTester(self , config_class=__snake_case , n_embd=37 )
def snake_case ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
lowerCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Union[str, Any] ):
pass
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Dict = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case ( self : List[str] ):
pass
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__snake_case )
lowerCamelCase :Any = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__snake_case ) # Legal the president is
lowerCamelCase :List[str] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase :int = model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() , __snake_case )
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : List[str] ):
lowerCamelCase :int = torch.nn.Linear(10 , 10 )
lowerCamelCase :str = torch.optim.SGD(model.parameters() , 0.1 )
lowerCamelCase :List[str] = Accelerator()
lowerCamelCase :Union[str, Any] = accelerator.prepare(__snake_case )
try:
pickle.loads(pickle.dumps(__snake_case ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = CycleDiffusionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'latents'}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCamelCase :str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
lowerCamelCase :Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase :Optional[int] = CLIPTextModel(__snake_case )
lowerCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Any=0 ):
lowerCamelCase :Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :int = image / 2 + 0.5
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :Union[str, Any] = torch.manual_seed(__snake_case )
else:
lowerCamelCase :List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :str = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase :int = self.get_dummy_components()
lowerCamelCase :Optional[int] = CycleDiffusionPipeline(**__snake_case )
lowerCamelCase :Tuple = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :int = self.get_dummy_inputs(__snake_case )
lowerCamelCase :Optional[Any] = pipe(**__snake_case )
lowerCamelCase :int = output.images
lowerCamelCase :Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase :Any = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def snake_case ( self : List[Any] ):
lowerCamelCase :Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , '''half''' ):
lowerCamelCase :Tuple = module.half()
lowerCamelCase :List[str] = CycleDiffusionPipeline(**__snake_case )
lowerCamelCase :Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :List[Any] = self.get_dummy_inputs(__snake_case )
lowerCamelCase :Any = pipe(**__snake_case )
lowerCamelCase :Dict = output.images
lowerCamelCase :List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase :Optional[int] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case ( self : Optional[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def snake_case ( self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def snake_case ( self : Any ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowerCamelCase :Optional[Any] = init_image.resize((512, 512) )
lowerCamelCase :List[Any] = '''CompVis/stable-diffusion-v1-4'''
lowerCamelCase :List[Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
lowerCamelCase :Dict = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
lowerCamelCase :Union[str, Any] = '''A black colored car'''
lowerCamelCase :List[Any] = '''A blue colored car'''
lowerCamelCase :Optional[Any] = torch.manual_seed(0 )
lowerCamelCase :str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :Optional[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def snake_case ( self : str ):
lowerCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowerCamelCase :Tuple = init_image.resize((512, 512) )
lowerCamelCase :List[str] = '''CompVis/stable-diffusion-v1-4'''
lowerCamelCase :Any = DDIMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
lowerCamelCase :Optional[Any] = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
lowerCamelCase :Optional[int] = '''A black colored car'''
lowerCamelCase :Optional[int] = '''A blue colored car'''
lowerCamelCase :Optional[Any] = torch.manual_seed(0 )
lowerCamelCase :str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :Optional[Any] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'resnet'
_UpperCAmelCase = ['basic', 'bottleneck']
def __init__( self : Optional[int] , __snake_case : List[Any]=3 , __snake_case : Union[str, Any]=64 , __snake_case : List[Any]=[256, 512, 1024, 2048] , __snake_case : List[Any]=[3, 4, 6, 3] , __snake_case : int="bottleneck" , __snake_case : Optional[int]="relu" , __snake_case : str=False , __snake_case : int=None , __snake_case : Optional[Any]=None , **__snake_case : str , ):
super().__init__(**__snake_case )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
lowerCamelCase :List[str] = num_channels
lowerCamelCase :Dict = embedding_size
lowerCamelCase :Any = hidden_sizes
lowerCamelCase :Union[str, Any] = depths
lowerCamelCase :str = layer_type
lowerCamelCase :Any = hidden_act
lowerCamelCase :Optional[int] = downsample_in_first_stage
lowerCamelCase :List[Any] = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )]
lowerCamelCase , lowerCamelCase :str = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = version.parse('1.11' )
@property
def snake_case ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self : str ):
return 1e-3
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 42
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Union[str, Any] , __snake_case : int = 3 , __snake_case : int = 3 , __snake_case : Tuple[str] = ("DownEncoderBlock2D",) , __snake_case : Tuple[str] = ("UpDecoderBlock2D",) , __snake_case : Tuple[int] = (64,) , __snake_case : int = 1 , __snake_case : str = "silu" , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 256 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : float = 0.1_8_2_1_5 , __snake_case : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase :Tuple = Encoder(
in_channels=__snake_case , out_channels=__snake_case , down_block_types=__snake_case , block_out_channels=__snake_case , layers_per_block=__snake_case , act_fn=__snake_case , norm_num_groups=__snake_case , double_z=__snake_case , )
lowerCamelCase :str = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase :List[Any] = nn.Convad(__snake_case , __snake_case , 1 )
lowerCamelCase :str = VectorQuantizer(__snake_case , __snake_case , beta=0.2_5 , remap=__snake_case , sane_index_shape=__snake_case )
lowerCamelCase :List[str] = nn.Convad(__snake_case , __snake_case , 1 )
# pass init params to Decoder
lowerCamelCase :str = Decoder(
in_channels=__snake_case , out_channels=__snake_case , up_block_types=__snake_case , block_out_channels=__snake_case , layers_per_block=__snake_case , act_fn=__snake_case , norm_num_groups=__snake_case , norm_type=__snake_case , )
@apply_forward_hook
def snake_case ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : bool = True ):
lowerCamelCase :str = self.encoder(__snake_case )
lowerCamelCase :int = self.quant_conv(__snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__snake_case )
@apply_forward_hook
def snake_case ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : bool = False , __snake_case : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase , lowerCamelCase , lowerCamelCase :List[str] = self.quantize(__snake_case )
else:
lowerCamelCase :Any = h
lowerCamelCase :List[str] = self.post_quant_conv(__snake_case )
lowerCamelCase :int = self.decoder(__snake_case , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
def snake_case ( self : str , __snake_case : torch.FloatTensor , __snake_case : bool = True ):
lowerCamelCase :Union[str, Any] = sample
lowerCamelCase :Optional[int] = self.encode(__snake_case ).latents
lowerCamelCase :List[Any] = self.decode(__snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _lowerCamelCase ( a_ : list[list[float]]):
lowerCamelCase :Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(a_) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
lowerCamelCase :Optional[int] = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError('''This matrix has no inverse.''')
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase :List[Any] = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase , lowerCamelCase :Union[str, Any] = matrix[1][1], matrix[0][0]
lowerCamelCase , lowerCamelCase :Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(a_)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(a_) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase :List[Any] = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError('''This matrix has no inverse.''')
# Creating cofactor matrix
lowerCamelCase :Any = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
lowerCamelCase :int = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
lowerCamelCase :Any = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
lowerCamelCase :List[Any] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
lowerCamelCase :List[Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
lowerCamelCase :Optional[int] = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
lowerCamelCase :Optional[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
lowerCamelCase :Dict = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
lowerCamelCase :Tuple = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
lowerCamelCase :List[str] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase :int = array(a_)
for i in range(3):
for j in range(3):
lowerCamelCase :List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase :Optional[Any] = array(a_)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(a_)
# Calculate the inverse of the matrix
return [[float(d(a_)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''')
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A__ = logging.getLogger(__name__)
def _lowerCamelCase ( a_ : Tuple=2 , a_ : Optional[int]=3 , a_ : Tuple=16 , a_ : int = 10 , a_ : int = 2):
def get_dataset(a_ : List[str]):
lowerCamelCase :int = torch.randn(batch_size * n_batches , 1)
return TensorDataset(a_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1))
lowerCamelCase :List[Any] = get_dataset(a_)
lowerCamelCase :Dict = get_dataset(a_)
lowerCamelCase :Optional[Any] = DataLoader(a_ , shuffle=a_ , batch_size=a_ , num_workers=4)
lowerCamelCase :int = DataLoader(a_ , shuffle=a_ , batch_size=a_ , num_workers=4)
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( a_ : Any , a_ : Dict , a_ : str , a_ : List[str] , a_ : Optional[int] , a_ : Union[str, Any]=None):
lowerCamelCase :int = []
for epoch in range(a_):
# Train quickly
model.train()
for batch in dataloader:
lowerCamelCase , lowerCamelCase :List[Any] = batch
lowerCamelCase :List[Any] = model(a_)
lowerCamelCase :Dict = torch.nn.functional.mse_loss(a_ , a_)
accelerator.backward(a_)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] ):
super().__init__()
lowerCamelCase :Optional[int] = nn.Parameter(torch.randn(1 ) )
lowerCamelCase :List[Any] = nn.Parameter(torch.randn(1 ) )
def snake_case ( self : Optional[Any] , __snake_case : List[str] ):
return x * self.a + self.b
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase :Optional[Any] = DummyModel()
lowerCamelCase :Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase :str = dummy_dataloaders()
lowerCamelCase :List[Any] = ProjectConfiguration(total_limit=1 , project_dir=__snake_case , automatic_checkpoint_naming=__snake_case )
# Train baseline
lowerCamelCase :List[str] = Accelerator(project_config=__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Dict = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase :int = DummyModel()
lowerCamelCase :str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase :int = dummy_dataloaders()
# Train baseline
lowerCamelCase :Optional[Any] = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
lowerCamelCase :Optional[int] = os.path.join(__snake_case , '''initial''' )
accelerator.save_state(__snake_case )
((lowerCamelCase) , (lowerCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
lowerCamelCase :List[str] = optimizer.state_dict()
lowerCamelCase :Union[str, Any] = train(3 , __snake_case , __snake_case , __snake_case , __snake_case )
((lowerCamelCase) , (lowerCamelCase)) :Tuple = model.a.item(), model.b.item()
lowerCamelCase :str = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCamelCase :Union[str, Any] = DummyModel()
lowerCamelCase :int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase :Optional[Any] = dummy_dataloaders()
lowerCamelCase :Optional[Any] = Accelerator()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Dict = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.load_state(__snake_case )
((lowerCamelCase) , (lowerCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
lowerCamelCase :List[Any] = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
lowerCamelCase :List[Any] = train(2 , __snake_case , __snake_case , __snake_case , __snake_case )
# Save everything
lowerCamelCase :int = os.path.join(__snake_case , '''checkpoint''' )
accelerator.save_state(__snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(__snake_case )
test_rands += train(1 , __snake_case , __snake_case , __snake_case , __snake_case )
((lowerCamelCase) , (lowerCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
lowerCamelCase :str = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase :str = DummyModel()
lowerCamelCase :List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase :Union[str, Any] = dummy_dataloaders()
lowerCamelCase :Any = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
lowerCamelCase :int = Accelerator(project_dir=__snake_case , project_config=__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
((lowerCamelCase) , (lowerCamelCase)) :List[Any] = model.a.item(), model.b.item()
lowerCamelCase :Any = optimizer.state_dict()
lowerCamelCase :List[Any] = train(3 , __snake_case , __snake_case , __snake_case , __snake_case )
((lowerCamelCase) , (lowerCamelCase)) :List[Any] = model.a.item(), model.b.item()
lowerCamelCase :Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCamelCase :Dict = DummyModel()
lowerCamelCase :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase , lowerCamelCase :Optional[Any] = dummy_dataloaders()
lowerCamelCase :Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__snake_case )
lowerCamelCase :List[str] = Accelerator(project_dir=__snake_case , project_config=__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.load_state(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_0''' ) )
((lowerCamelCase) , (lowerCamelCase)) :int = model.a.item(), model.b.item()
lowerCamelCase :Any = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = train(2 , __snake_case , __snake_case , __snake_case , __snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , __snake_case , __snake_case , __snake_case , __snake_case )
((lowerCamelCase) , (lowerCamelCase)) :Any = model.a.item(), model.b.item()
lowerCamelCase :int = optimizer.state_dict()
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :str = torch.tensor([1, 2, 3] )
lowerCamelCase :int = torch.tensor([2, 3, 4] )
lowerCamelCase :Union[str, Any] = DummyModel()
lowerCamelCase :Any = torch.optim.Adam(net.parameters() )
lowerCamelCase :Union[str, Any] = Accelerator()
with self.assertRaises(__snake_case ) as ve:
accelerator.register_for_checkpointing(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase :Union[str, Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase :List[str] = DummyModel()
lowerCamelCase :Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase :Optional[int] = torch.optim.lr_scheduler.StepLR(__snake_case , step_size=1 , gamma=0.9_9 )
lowerCamelCase , lowerCamelCase :List[Any] = dummy_dataloaders()
lowerCamelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
lowerCamelCase :List[Any] = Accelerator(project_dir=__snake_case , project_config=__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save initial
accelerator.save_state()
lowerCamelCase :Optional[Any] = scheduler.state_dict()
train(3 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.assertNotEqual(__snake_case , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(__snake_case , scheduler.state_dict() )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase :int = DummyModel()
lowerCamelCase :Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=__snake_case , total_limit=2 )
# Train baseline
lowerCamelCase :Dict = Accelerator(project_dir=__snake_case , project_config=__snake_case )
lowerCamelCase :Tuple = accelerator.prepare(__snake_case )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case ( self : str ):
lowerCamelCase :List[str] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A__ = """/tmp/accelerate/state_checkpointing"""
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A__ , A__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
A__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
A__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
A__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _lowerCamelCase ( a_ : str):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set())
@pytest.fixture
def _lowerCamelCase ( a_ : Dict):
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :Tuple = metric_id
class _lowerCAmelCase :
_UpperCAmelCase = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def snake_case ( self : Tuple ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock())
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))])
def _lowerCamelCase ( a_ : Tuple , a_ : Dict , a_ : Union[str, Any] , a_ : str , a_ : Optional[int]):
if "tmp_path" in args:
lowerCamelCase :Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args)
with pytest.warns(a_ , match='''https://huggingface.co/docs/evaluate'''):
func(*a_)
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'megatron-bert'
def __init__( self : List[str] , __snake_case : int=29056 , __snake_case : List[Any]=1024 , __snake_case : str=24 , __snake_case : Dict=16 , __snake_case : Union[str, Any]=4096 , __snake_case : List[Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : str=512 , __snake_case : str=2 , __snake_case : List[str]=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : int=0 , __snake_case : Tuple="absolute" , __snake_case : List[Any]=True , **__snake_case : List[str] , ):
super().__init__(pad_token_id=__snake_case , **__snake_case )
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :str = hidden_size
lowerCamelCase :Any = num_hidden_layers
lowerCamelCase :List[Any] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :str = intermediate_size
lowerCamelCase :Optional[Any] = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :int = max_position_embeddings
lowerCamelCase :Dict = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :List[str] = layer_norm_eps
lowerCamelCase :Optional[int] = position_embedding_type
lowerCamelCase :Optional[Any] = use_cache
| 49
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any=None , __snake_case : int=1 ):
lowerCamelCase :Optional[Any] = tokenizer
lowerCamelCase :Dict = dataset
lowerCamelCase :Tuple = len(__snake_case ) if n_tasks is None else n_tasks
lowerCamelCase :List[str] = n_copies
def __iter__( self : Tuple ):
lowerCamelCase :Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase :List[str] = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int ):
lowerCamelCase :Dict = start_length
lowerCamelCase :Any = eof_strings
lowerCamelCase :int = tokenizer
def __call__( self : Any , __snake_case : str , __snake_case : Dict , **__snake_case : List[str] ):
lowerCamelCase :Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase :Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :Tuple = re.split('''(%s)''' % '''|'''.join(a_) , a_)
# last string should be ""
return "".join(string_list[:-2])
def _lowerCamelCase ( a_ : List[str] , a_ : str , a_ : Optional[int] , a_ : str , a_ : Optional[int] , a_ : Tuple=20 , **a_ : int):
lowerCamelCase :Dict = defaultdict(a_) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_)):
with torch.no_grad():
lowerCamelCase :Optional[int] = batch['''ids'''].shape[-1]
lowerCamelCase :Union[str, Any] = accelerator.unwrap_model(a_).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=a_ , **a_)
# each task is generated batch_size times
lowerCamelCase :Any = batch['''task_id'''].repeat(a_)
lowerCamelCase :Tuple = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id)
lowerCamelCase , lowerCamelCase :List[str] = accelerator.gather((generated_tokens, generated_tasks))
lowerCamelCase :Tuple = generated_tokens.cpu().numpy()
lowerCamelCase :Tuple = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_):
gen_token_dict[task].append(a_)
lowerCamelCase :Optional[Any] = [[] for _ in range(a_)]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase :List[str] = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_)
code_gens[task].append(remove_last_block(a_))
return code_gens
def _lowerCamelCase ( ):
# Setup configuration
lowerCamelCase :int = HfArgumentParser(a_)
lowerCamelCase :Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase :Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase :str = '''false'''
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase :Union[str, Any] = Accelerator()
set_seed(args.seed , device_specific=a_)
# Load model and tokenizer
lowerCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
lowerCamelCase :str = tokenizer.eos_token
lowerCamelCase :str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
# Generation settings
lowerCamelCase :Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_)]),
}
# Load evaluation dataset and metric
lowerCamelCase :int = load_dataset('''openai_humaneval''')
lowerCamelCase :Dict = load_metric('''code_eval''')
lowerCamelCase :Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''])
lowerCamelCase :Union[str, Any] = args.n_samples // args.batch_size
lowerCamelCase :Union[str, Any] = TokenizedDataset(a_ , human_eval['''test'''] , n_copies=a_ , n_tasks=a_)
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase :Dict = DataLoader(a_ , batch_size=1)
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase :Optional[int] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']])
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''')
raise exception
lowerCamelCase , lowerCamelCase :Union[str, Any] = accelerator.prepare(a_ , a_)
lowerCamelCase :int = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
lowerCamelCase :Dict = []
for task in tqdm(range(a_)):
lowerCamelCase :Tuple = human_eval['''test'''][task]['''test''']
lowerCamelCase :int = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point)
# Evaluate completions with "code_eval" metric
lowerCamelCase , lowerCamelCase :Any = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers)
print(F"Results: {pass_at_k}")
# Save results to json file
with open(args.output_file , '''w''') as fp:
json.dump(a_ , a_)
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[int] , __snake_case : int , __snake_case : int ):
lowerCamelCase :Dict = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :Dict = 20
lowerCamelCase :Any = self._get_uniform_logits(batch_size=2 , length=__snake_case )
# tweak scores to not be uniform anymore
lowerCamelCase :Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase :Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase :Tuple = jax.nn.softmax(__snake_case , axis=-1 )
lowerCamelCase :List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase :Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
lowerCamelCase :Any = jax.nn.softmax(temp_dist_warper_smoother(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = None
lowerCamelCase :Optional[int] = 10
lowerCamelCase :str = 2
# create ramp distribution
lowerCamelCase :str = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase :List[str] = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase :Optional[int] = FlaxTopKLogitsWarper(3 )
lowerCamelCase :int = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase :Union[str, Any] = 5
lowerCamelCase :List[str] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase :Union[str, Any] = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, length) ).copy()
lowerCamelCase :List[Any] = top_k_warp_safety_check(__snake_case , __snake_case , cur_len=__snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case ( self : Any ):
lowerCamelCase :List[str] = None
lowerCamelCase :Any = 10
lowerCamelCase :Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase :Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase :int = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase :List[Any] = np.exp(top_p_warp(__snake_case , __snake_case , cur_len=__snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase :Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase :int = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase :int = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase :Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase :List[Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[Any] = 20
lowerCamelCase :Optional[int] = 4
lowerCamelCase :List[str] = 0
lowerCamelCase :int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
# check that min length is applied at length 5
lowerCamelCase :int = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase :int = 5
lowerCamelCase :List[Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :List[Any] = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase :Optional[Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :Any = 15
lowerCamelCase :Any = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : str ):
lowerCamelCase :Any = 20
lowerCamelCase :List[Any] = 4
lowerCamelCase :List[str] = 0
lowerCamelCase :Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase :Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase :List[str] = 1
lowerCamelCase :Dict = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase :Optional[Any] = 3
lowerCamelCase :Union[str, Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[Any] = 20
lowerCamelCase :List[str] = 4
lowerCamelCase :Any = 0
lowerCamelCase :Union[str, Any] = 5
lowerCamelCase :Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase :int = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase :Tuple = 4
lowerCamelCase :List[str] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :List[str] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase :Optional[int] = 3
lowerCamelCase :str = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :str = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = 4
lowerCamelCase :Dict = 10
lowerCamelCase :int = 15
lowerCamelCase :Any = 2
lowerCamelCase :Dict = 1
lowerCamelCase :Union[str, Any] = 15
# dummy input_ids and scores
lowerCamelCase :int = ids_tensor((batch_size, sequence_length) , __snake_case )
lowerCamelCase :int = input_ids.copy()
lowerCamelCase :Tuple = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :str = scores.copy()
# instantiate all dist processors
lowerCamelCase :List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :str = FlaxTopKLogitsWarper(3 )
lowerCamelCase :Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase :int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
lowerCamelCase :Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
lowerCamelCase :Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
lowerCamelCase :Tuple = 10
# no processor list
lowerCamelCase :Dict = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Tuple = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Optional[Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Dict = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :int = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Union[str, Any] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
# with processor list
lowerCamelCase :Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase :int = processor(__snake_case , __snake_case , cur_len=__snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = 4
lowerCamelCase :Optional[int] = 10
lowerCamelCase :int = 15
lowerCamelCase :List[str] = 2
lowerCamelCase :Optional[int] = 1
lowerCamelCase :Tuple = 15
# dummy input_ids and scores
lowerCamelCase :Tuple = ids_tensor((batch_size, sequence_length) , __snake_case )
lowerCamelCase :List[str] = input_ids.copy()
lowerCamelCase :List[str] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :Dict = scores.copy()
# instantiate all dist processors
lowerCamelCase :List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase :Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase :Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
lowerCamelCase :Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
lowerCamelCase :int = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
lowerCamelCase :Any = 10
# no processor list
def run_no_processor_list(__snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ):
lowerCamelCase :Dict = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :int = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :str = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :List[Any] = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Dict = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Optional[int] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
return scores
# with processor list
def run_processor_list(__snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase :List[str] = processor(__snake_case , __snake_case , cur_len=__snake_case )
return scores
lowerCamelCase :Optional[Any] = jax.jit(__snake_case )
lowerCamelCase :Tuple = jax.jit(__snake_case )
lowerCamelCase :List[str] = jitted_run_no_processor_list(__snake_case , __snake_case , __snake_case )
lowerCamelCase :int = jitted_run_processor_list(__snake_case , __snake_case , __snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 1
|
import operator as op
A__ = """scaler.pt"""
A__ = """pytorch_model"""
A__ = """random_states"""
A__ = """optimizer"""
A__ = """scheduler"""
A__ = """pytorch_model.bin"""
A__ = """pytorch_model.bin.index.json"""
A__ = """model.safetensors"""
A__ = """model.safetensors.index.json"""
A__ = """1.10.2"""
A__ = """py38"""
A__ = """4.17.0"""
A__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
A__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
A__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
A__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
A__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
A__ = """2.0.1"""
A__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
A__ = ["""default""", """reduce-overhead""", """max-autotune"""]
A__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
A__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
A__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , __snake_case : Dict , __snake_case : Tuple ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Any , __snake_case : int = 1 , __snake_case : int = 100 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[float] = None , __snake_case : bool = True , ):
if audio_length_in_s is None:
lowerCamelCase :Union[str, Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase :List[str] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase :int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
lowerCamelCase :Union[str, Any] = int(__snake_case )
if sample_size % down_scale_factor != 0:
lowerCamelCase :Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
lowerCamelCase :str = int(__snake_case )
lowerCamelCase :List[Any] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase :Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCamelCase :str = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
# set step values
self.scheduler.set_timesteps(__snake_case , device=audio.device )
lowerCamelCase :Optional[Any] = self.scheduler.timesteps.to(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase :List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase :Any = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
lowerCamelCase :List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase :Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__snake_case )
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
from math import pi, sqrt, tan
def _lowerCamelCase ( a_ : float):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''')
return 6 * side_length**2
def _lowerCamelCase ( a_ : float , a_ : float , a_ : float):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''')
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCamelCase ( a_ : float):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''')
return 4 * pi * radius**2
def _lowerCamelCase ( a_ : float):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''')
return 3 * pi * radius**2
def _lowerCamelCase ( a_ : float , a_ : float):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''')
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCamelCase ( a_ : float , a_ : float , a_ : float):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''')
lowerCamelCase :List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCamelCase ( a_ : float , a_ : float):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''')
return 2 * pi * radius * (height + radius)
def _lowerCamelCase ( a_ : float , a_ : float):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''')
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''')
return 4 * pow(a_ , 2) * torus_radius * tube_radius
def _lowerCamelCase ( a_ : float , a_ : float):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''')
return length * width
def _lowerCamelCase ( a_ : float):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''')
return side_length**2
def _lowerCamelCase ( a_ : float , a_ : float):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''')
return (base * height) / 2
def _lowerCamelCase ( a_ : float , a_ : float , a_ : float):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''')
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''')
lowerCamelCase :str = (sidea + sidea + sidea) / 2
lowerCamelCase :Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def _lowerCamelCase ( a_ : float , a_ : float):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''')
return base * height
def _lowerCamelCase ( a_ : float , a_ : float , a_ : float):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''')
return 1 / 2 * (basea + basea) * height
def _lowerCamelCase ( a_ : float):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''')
return pi * radius**2
def _lowerCamelCase ( a_ : float , a_ : float):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''')
return pi * radius_x * radius_y
def _lowerCamelCase ( a_ : float , a_ : float):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''')
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCamelCase ( a_ : int , a_ : float):
if not isinstance(a_ , a_) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''')
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''')
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print("""\nSurface Areas of various geometric shapes: \n""")
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 49
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Any=7 , __snake_case : List[str]=3 , __snake_case : int=10 , __snake_case : Tuple=18 , __snake_case : List[Any]=30 , __snake_case : Optional[int]=400 , __snake_case : str=True , __snake_case : List[str]=None , __snake_case : Tuple=True , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : Optional[int]=[0.5, 0.5, 0.5] , __snake_case : Optional[int]=None , ):
lowerCamelCase :str = size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase :Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase :int = parent
lowerCamelCase :Union[str, Any] = batch_size
lowerCamelCase :Union[str, Any] = num_channels
lowerCamelCase :Optional[int] = num_frames
lowerCamelCase :List[str] = image_size
lowerCamelCase :Tuple = min_resolution
lowerCamelCase :Optional[int] = max_resolution
lowerCamelCase :Optional[Any] = do_resize
lowerCamelCase :Optional[int] = size
lowerCamelCase :Optional[Any] = do_normalize
lowerCamelCase :str = image_mean
lowerCamelCase :List[str] = image_std
lowerCamelCase :Tuple = crop_size
def snake_case ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = VivitImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCamelCase :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case ( self : List[str] ):
# Initialize image_processing
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase :Tuple = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCamelCase :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : Dict ):
# Initialize image_processing
lowerCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase :str = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCamelCase :List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : Tuple ):
# Initialize image_processing
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase :List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCamelCase :Dict = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 49
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
A__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A__ = [None] * 10_000_000
A__ = True
A__ = False
def _lowerCamelCase ( a_ : int):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase :List[Any] = chain(next_number(a_))
lowerCamelCase :List[Any] = number_chain
while number < 10_00_00_00:
lowerCamelCase :List[Any] = number_chain
number *= 10
return number_chain
def _lowerCamelCase ( a_ : int = 10_00_00_00):
for i in range(1 , a_):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCamelCase ( a_ : int):
if not is_accelerate_available():
return method
lowerCamelCase :Any = version.parse(accelerate.__version__).base_version
if version.parse(a_) < version.parse('''0.17.0'''):
return method
def wrapper(self : Optional[Any] , *a_ : List[str] , **a_ : str):
if hasattr(self , '''_hf_hook''') and hasattr(self._hf_hook , '''pre_forward'''):
self._hf_hook.pre_forward(self)
return method(self , *a_ , **a_)
return wrapper
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any]=13 , __snake_case : int=10 , __snake_case : Optional[Any]=3 , __snake_case : Union[str, Any]=2 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=2 , __snake_case : int=True , __snake_case : Dict=True , __snake_case : Optional[int]=32 , __snake_case : int=5 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : str=10 , __snake_case : Dict=0.0_2 , __snake_case : Union[str, Any]=0.9 , __snake_case : Any=None , ):
lowerCamelCase :Dict = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :str = image_size
lowerCamelCase :int = num_channels
lowerCamelCase :Any = patch_size
lowerCamelCase :Tuple = tubelet_size
lowerCamelCase :int = num_frames
lowerCamelCase :Any = is_training
lowerCamelCase :List[str] = use_labels
lowerCamelCase :Dict = hidden_size
lowerCamelCase :int = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Optional[int] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :Dict = hidden_dropout_prob
lowerCamelCase :Tuple = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :List[Any] = initializer_range
lowerCamelCase :Any = mask_ratio
lowerCamelCase :Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase :Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase :List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase :Optional[Any] = int(mask_ratio * self.seq_length )
def snake_case ( self : Any ):
lowerCamelCase :str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :Any = None
if self.use_labels:
lowerCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[Any] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def snake_case ( self : List[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] ):
lowerCamelCase :str = VideoMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Optional[int] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] ):
lowerCamelCase :Any = VideoMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :int = torch.ones((self.num_masks,) )
lowerCamelCase :Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[Any] = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase :int = model(__snake_case , __snake_case )
# model only returns predictions for masked patches
lowerCamelCase :Optional[Any] = mask.sum().item()
lowerCamelCase :Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Tuple = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCAmelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = VideoMAEModelTester(self )
lowerCamelCase :Tuple = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False ):
lowerCamelCase :Optional[Any] = copy.deepcopy(__snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :Optional[int] = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase :Tuple = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[int] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase :Tuple = bool_masked_pos.to(__snake_case )
if return_labels:
if model_class in [
*get_values(__snake_case ),
]:
lowerCamelCase :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : Dict ):
pass
def snake_case ( self : Optional[Any] ):
lowerCamelCase , lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :List[str] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : List[str] ):
lowerCamelCase , lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
lowerCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Any = [*signature.parameters.keys()]
lowerCamelCase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Dict = VideoMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case ( self : str ):
if not self.has_attentions:
pass
else:
lowerCamelCase , lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :Any = True
for model_class in self.all_model_classes:
lowerCamelCase :Any = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :List[str] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase :List[str] = True
lowerCamelCase :Any = False
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Tuple = True
lowerCamelCase :Dict = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Any = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :List[str] = True
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
lowerCamelCase :Any = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
lowerCamelCase :List[str] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.hidden_states
lowerCamelCase :Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase :Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[Any] ):
pass
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''')
lowerCamelCase :List[str] = np.load(a_)
return list(a_)
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[str] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :int = prepare_video()
lowerCamelCase :Dict = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :List[str] = model(**__snake_case )
# verify the logits
lowerCamelCase :Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :List[str] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Dict ):
lowerCamelCase :str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__snake_case )
lowerCamelCase :str = self.default_image_processor
lowerCamelCase :Any = prepare_video()
lowerCamelCase :List[Any] = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# add boolean mask, indicating which patches to mask
lowerCamelCase :int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase :Optional[Any] = torch.load(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Union[str, Any] = model(**__snake_case )
# verify the logits
lowerCamelCase :List[Any] = torch.Size([1, 1408, 1536] )
lowerCamelCase :List[str] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__snake_case )
self.assertEqual(outputs.logits.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase :str = torch.tensor([0.5_1_4_2] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase :List[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__snake_case ).to(
__snake_case )
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**__snake_case )
lowerCamelCase :Any = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ = logging.get_logger(__name__)
A__ = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'conditional_detr'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Union[str, Any] , __snake_case : Any=True , __snake_case : Tuple=None , __snake_case : Union[str, Any]=3 , __snake_case : Dict=300 , __snake_case : Tuple=6 , __snake_case : Union[str, Any]=2048 , __snake_case : List[Any]=8 , __snake_case : List[str]=6 , __snake_case : int=2048 , __snake_case : Any=8 , __snake_case : Any=0.0 , __snake_case : Any=0.0 , __snake_case : List[str]=True , __snake_case : Dict="relu" , __snake_case : int=256 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.0 , __snake_case : Tuple=0.0 , __snake_case : Any=0.0_2 , __snake_case : Union[str, Any]=1.0 , __snake_case : List[Any]=False , __snake_case : Optional[Any]="sine" , __snake_case : List[Any]="resnet50" , __snake_case : str=True , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=2 , __snake_case : Optional[Any]=5 , __snake_case : int=2 , __snake_case : int=1 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=5 , __snake_case : Any=2 , __snake_case : Optional[Any]=0.2_5 , **__snake_case : List[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase :Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
lowerCamelCase :Dict = backbone_config.get('''model_type''' )
lowerCamelCase :Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase :Any = config_class.from_dict(__snake_case )
lowerCamelCase :Union[str, Any] = use_timm_backbone
lowerCamelCase :List[str] = backbone_config
lowerCamelCase :Optional[Any] = num_channels
lowerCamelCase :Any = num_queries
lowerCamelCase :Any = d_model
lowerCamelCase :Dict = encoder_ffn_dim
lowerCamelCase :Optional[Any] = encoder_layers
lowerCamelCase :Optional[int] = encoder_attention_heads
lowerCamelCase :Union[str, Any] = decoder_ffn_dim
lowerCamelCase :Dict = decoder_layers
lowerCamelCase :List[str] = decoder_attention_heads
lowerCamelCase :List[Any] = dropout
lowerCamelCase :Tuple = attention_dropout
lowerCamelCase :List[str] = activation_dropout
lowerCamelCase :List[str] = activation_function
lowerCamelCase :Optional[int] = init_std
lowerCamelCase :int = init_xavier_std
lowerCamelCase :List[str] = encoder_layerdrop
lowerCamelCase :List[Any] = decoder_layerdrop
lowerCamelCase :Any = encoder_layers
lowerCamelCase :Optional[int] = auxiliary_loss
lowerCamelCase :Any = position_embedding_type
lowerCamelCase :str = backbone
lowerCamelCase :Optional[int] = use_pretrained_backbone
lowerCamelCase :Any = dilation
# Hungarian matcher
lowerCamelCase :Dict = class_cost
lowerCamelCase :int = bbox_cost
lowerCamelCase :Tuple = giou_cost
# Loss coefficients
lowerCamelCase :int = mask_loss_coefficient
lowerCamelCase :Tuple = dice_loss_coefficient
lowerCamelCase :Optional[int] = cls_loss_coefficient
lowerCamelCase :Tuple = bbox_loss_coefficient
lowerCamelCase :Union[str, Any] = giou_loss_coefficient
lowerCamelCase :int = focal_alpha
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def snake_case ( self : List[str] ):
return self.encoder_attention_heads
@property
def snake_case ( self : int ):
return self.d_model
def snake_case ( self : List[Any] ):
lowerCamelCase :Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase :Any = self.backbone_config.to_dict()
lowerCamelCase :List[Any] = self.__class__.model_type
return output
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = version.parse('1.11' )
@property
def snake_case ( self : List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def snake_case ( self : Any ):
return 1e-5
@property
def snake_case ( self : Any ):
return 12
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
import re
def _lowerCamelCase ( a_ : str):
lowerCamelCase :List[Any] = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''')
if match := re.search(a_ , a_):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( a_ : Any , a_ : Any):
assert isinstance(a_ , a_)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True])
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Tuple , a_ : List[Any]):
lowerCamelCase :Dict = tmp_path / '''cache'''
lowerCamelCase :Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase :List[Any] = JsonDatasetReader(a_ , cache_dir=a_ , keep_in_memory=a_).read()
_check_json_dataset(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _lowerCamelCase ( a_ : Dict , a_ : Union[str, Any] , a_ : int):
lowerCamelCase :str = tmp_path / '''cache'''
lowerCamelCase :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase :int = features.copy() if features else default_expected_features
lowerCamelCase :Dict = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase :Optional[Any] = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
_check_json_dataset(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _lowerCamelCase ( a_ : Optional[int] , a_ : int , a_ : str):
lowerCamelCase :Union[str, Any] = tmp_path / '''cache'''
lowerCamelCase :List[Any] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCamelCase :str = features.copy() if features else default_expected_features
lowerCamelCase :Optional[int] = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase :int = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
assert isinstance(a_ , a_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _lowerCamelCase ( a_ : Optional[int] , a_ : int):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCamelCase :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCamelCase :int = features.copy()
lowerCamelCase :Dict = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase :List[str] = tmp_path / '''cache'''
lowerCamelCase :List[str] = JsonDatasetReader(a_ , features=a_ , cache_dir=a_).read()
assert isinstance(a_ , a_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train'''), '''train''', '''test'''])
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[str]):
lowerCamelCase :Any = tmp_path / '''cache'''
lowerCamelCase :Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase :Any = JsonDatasetReader(a_ , cache_dir=a_ , split=a_).read()
_check_json_dataset(a_ , a_)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list])
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Tuple):
if issubclass(a_ , a_):
lowerCamelCase :str = jsonl_path
elif issubclass(a_ , a_):
lowerCamelCase :List[Any] = [jsonl_path]
lowerCamelCase :Any = tmp_path / '''cache'''
lowerCamelCase :List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase :Dict = JsonDatasetReader(a_ , cache_dir=a_).read()
_check_json_dataset(a_ , a_)
def _lowerCamelCase ( a_ : str , a_ : Dict , a_ : Any=("train",)):
assert isinstance(a_ , a_)
for split in splits:
lowerCamelCase :Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True])
def _lowerCamelCase ( a_ : int , a_ : List[Any] , a_ : Dict):
lowerCamelCase :Dict = tmp_path / '''cache'''
lowerCamelCase :int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase :Union[str, Any] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=a_ , keep_in_memory=a_).read()
_check_json_datasetdict(a_ , a_)
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _lowerCamelCase ( a_ : Any , a_ : Union[str, Any] , a_ : int):
lowerCamelCase :List[Any] = tmp_path / '''cache'''
lowerCamelCase :List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase :List[str] = features.copy() if features else default_expected_features
lowerCamelCase :Any = (
Features({feature: Value(a_) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase :List[str] = JsonDatasetReader({'''train''': jsonl_path} , features=a_ , cache_dir=a_).read()
_check_json_datasetdict(a_ , a_)
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train'''), '''train''', '''test'''])
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Any , a_ : str):
if split:
lowerCamelCase :Any = {split: jsonl_path}
else:
lowerCamelCase :Optional[Any] = '''train'''
lowerCamelCase :List[str] = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCamelCase :List[Any] = tmp_path / '''cache'''
lowerCamelCase :Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCamelCase :Dict = JsonDatasetReader(a_ , cache_dir=a_).read()
_check_json_datasetdict(a_ , a_ , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def _lowerCamelCase ( a_ : int):
return json.load(a_)
def _lowerCamelCase ( a_ : Union[str, Any]):
return [json.loads(a_) for line in buffer]
class _lowerCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : str ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
lowerCamelCase :Optional[int] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case ( self : Optional[Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
lowerCamelCase :str = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case ( self : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase :Any = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case ( self : Any , __snake_case : int , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase :Tuple = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def snake_case ( self : Optional[Any] , __snake_case : Dict ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case ( self : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] ):
lowerCamelCase :Any = tmp_path_factory.mktemp('''data''' ) / F"test.json.{extension}"
lowerCamelCase :int = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
lowerCamelCase :List[Any] = f.read()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
lowerCamelCase :int = f.read()
assert exported_content == original_content
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = """▁"""
A__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
A__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
A__ = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
A__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
A__ = {"""mustc""": MUSTC_LANGS}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = MAX_MODEL_INPUT_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
_UpperCAmelCase = []
def __init__( self : Tuple , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Tuple="<s>" , __snake_case : Dict="</s>" , __snake_case : List[str]="<pad>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Optional[Any]=False , __snake_case : Tuple=False , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : str , ):
lowerCamelCase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase :Optional[int] = do_upper_case
lowerCamelCase :int = do_lower_case
lowerCamelCase :Dict = load_json(__snake_case )
lowerCamelCase :Tuple = {v: k for k, v in self.encoder.items()}
lowerCamelCase :List[Any] = spm_file
lowerCamelCase :Union[str, Any] = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase :List[str] = lang_codes
lowerCamelCase :int = LANGUAGES[lang_codes]
lowerCamelCase :Tuple = [F"<lang:{lang}>" for lang in self.langs]
lowerCamelCase :Optional[int] = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowerCamelCase :str = self.lang_tokens
lowerCamelCase :List[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase :int = {}
@property
def snake_case ( self : Optional[Any] ):
return len(self.encoder )
@property
def snake_case ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def snake_case ( self : List[str] , __snake_case : Union[str, Any] ):
lowerCamelCase :Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def snake_case ( self : Tuple , __snake_case : str ):
lowerCamelCase :Any = self.lang_code_to_id[tgt_lang]
lowerCamelCase :List[str] = [lang_code_id]
def snake_case ( self : Any , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def snake_case ( self : Tuple , __snake_case : int ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def snake_case ( self : Dict , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
lowerCamelCase :Any = []
lowerCamelCase :int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase :int = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase :Any = []
else:
current_sub_tokens.append(__snake_case )
lowerCamelCase :int = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case ( self : int , __snake_case : Tuple , __snake_case : Any=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
lowerCamelCase :List[str] = [1] * len(self.prefix_tokens )
lowerCamelCase :Union[str, Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
lowerCamelCase :Any = self.__dict__.copy()
lowerCamelCase :str = None
return state
def __setstate__( self : Any , __snake_case : Dict ):
lowerCamelCase :int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
lowerCamelCase :int = Path(__snake_case )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowerCamelCase :List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase :Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def _lowerCamelCase ( a_ : str , a_ : Dict[str, Any]):
lowerCamelCase :Tuple = sentencepiece.SentencePieceProcessor(**a_)
spm.Load(str(a_))
return spm
def _lowerCamelCase ( a_ : str):
with open(a_ , '''r''') as f:
return json.load(a_)
def _lowerCamelCase ( a_ : List[str] , a_ : str):
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=2)
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'vit_msn'
def __init__( self : List[str] , __snake_case : Union[str, Any]=768 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : Any=3072 , __snake_case : Any="gelu" , __snake_case : int=0.0 , __snake_case : str=0.0 , __snake_case : List[Any]=0.0_2 , __snake_case : int=1e-0_6 , __snake_case : Any=224 , __snake_case : Dict=16 , __snake_case : Dict=3 , __snake_case : Union[str, Any]=True , **__snake_case : Union[str, Any] , ):
super().__init__(**__snake_case )
lowerCamelCase :Optional[int] = hidden_size
lowerCamelCase :int = num_hidden_layers
lowerCamelCase :int = num_attention_heads
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[str] = initializer_range
lowerCamelCase :Tuple = layer_norm_eps
lowerCamelCase :Tuple = image_size
lowerCamelCase :List[str] = patch_size
lowerCamelCase :Dict = num_channels
lowerCamelCase :Union[str, Any] = qkv_bias
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : Union[tf.Tensor, np.ndarray]):
if isinstance(a_ , np.ndarray):
return list(tensor.shape)
lowerCamelCase :int = tf.shape(a_)
if tensor.shape == tf.TensorShape(a_):
return dynamic
lowerCamelCase :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a_)]
def _lowerCamelCase ( a_ : tf.Tensor , a_ : Optional[int] = None , a_ : Optional[str] = None):
return tf.nn.softmax(logits=logits + 1e-9 , axis=a_ , name=a_)
def _lowerCamelCase ( a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str=1e-5 , a_ : List[Any]=-1):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a_ , a_):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''')
# Get mean and variance on the axis to be normalized
lowerCamelCase , lowerCamelCase :Any = tf.nn.moments(a_ , axes=[axis] , keepdims=a_)
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase :Dict = [1] * inputs.shape.rank
lowerCamelCase :Tuple = shape_list(a_)[axis]
lowerCamelCase :List[Any] = tf.reshape(a_ , a_)
lowerCamelCase :str = tf.reshape(a_ , a_)
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase :Dict = tf.nn.batch_normalization(
a_ , a_ , a_ , offset=a_ , scale=a_ , variance_epsilon=a_ , )
return outputs
def _lowerCamelCase ( a_ : Dict , a_ : List[str]=0 , a_ : Dict=-1):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase :str = tf.shape(a_)
lowerCamelCase :int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1])
lowerCamelCase :int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0)
return tf.reshape(a_ , a_)
def _lowerCamelCase ( a_ : tf.Tensor):
if not isinstance(a_ , tf.Tensor):
lowerCamelCase :Optional[int] = tf.convert_to_tensor(a_) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase :int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase :List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase :Tuple = (
tf.cast(1 , encoder_attention_mask.dtype) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCamelCase ( a_ : tf.Tensor , a_ : int , a_ : str = "input_ids"):
tf.debugging.assert_less(
a_ , tf.cast(a_ , dtype=tensor.dtype) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(a_)}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _lowerCamelCase ( a_ : int , a_ : Dict , a_ : Any):
lowerCamelCase :Optional[Any] = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase :str = [x for x in data if len(a_) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}")
lowerCamelCase :Optional[int] = np.asarray(a_)
lowerCamelCase :Union[str, Any] = 1
lowerCamelCase :Tuple = np.array_split(a_ , a_)
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
num_chunks += 1
lowerCamelCase :List[str] = np.array_split(a_ , a_)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a_):
lowerCamelCase :Any = chunk_data
else:
lowerCamelCase :List[Any] = data
def _lowerCamelCase ( a_ : str , a_ : List[Any]):
if name in group.attrs:
lowerCamelCase :Union[str, Any] = [n.decode('''utf8''') if hasattr(a_ , '''decode''') else n for n in group.attrs[name]]
else:
lowerCamelCase :str = []
lowerCamelCase :List[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''') if hasattr(a_ , '''decode''') else n for n in group.attrs['''%s%d''' % (name, chunk_id)]])
chunk_id += 1
return data
def _lowerCamelCase ( a_ : Optional[Any]):
def _expand_single_ad_tensor(a_ : Any):
if isinstance(a_ , tf.Tensor) and t.shape.rank == 1:
return tf.expand_dims(a_ , axis=-1)
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a_)
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'autoformer'
_UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case : bool = True , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 64 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : str = "gelu" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 100 , __snake_case : float = 0.0_2 , __snake_case : bool = True , __snake_case : List[Any]=True , __snake_case : int = 10 , __snake_case : int = 25 , __snake_case : int = 3 , **__snake_case : Optional[int] , ):
# time series specific configuration
lowerCamelCase :List[str] = prediction_length
lowerCamelCase :Tuple = context_length if context_length is not None else prediction_length
lowerCamelCase :Union[str, Any] = distribution_output
lowerCamelCase :Tuple = loss
lowerCamelCase :Optional[Any] = input_size
lowerCamelCase :Any = num_time_features
lowerCamelCase :Dict = lags_sequence
lowerCamelCase :Optional[int] = scaling
lowerCamelCase :Dict = num_dynamic_real_features
lowerCamelCase :Union[str, Any] = num_static_real_features
lowerCamelCase :List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase :Dict = cardinality
else:
lowerCamelCase :Optional[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase :Optional[int] = embedding_dimension
else:
lowerCamelCase :Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase :Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase :Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase :Tuple = d_model
lowerCamelCase :Dict = encoder_attention_heads
lowerCamelCase :Union[str, Any] = decoder_attention_heads
lowerCamelCase :Dict = encoder_ffn_dim
lowerCamelCase :int = decoder_ffn_dim
lowerCamelCase :List[str] = encoder_layers
lowerCamelCase :int = decoder_layers
lowerCamelCase :Optional[Any] = dropout
lowerCamelCase :str = attention_dropout
lowerCamelCase :Optional[int] = activation_dropout
lowerCamelCase :List[str] = encoder_layerdrop
lowerCamelCase :List[str] = decoder_layerdrop
lowerCamelCase :List[Any] = activation_function
lowerCamelCase :Optional[int] = init_std
lowerCamelCase :str = use_cache
# Autoformer
lowerCamelCase :int = label_length
lowerCamelCase :List[str] = moving_average
lowerCamelCase :List[str] = autocorrelation_factor
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def snake_case ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """sentencepiece.model"""}
A__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
A__ = {
"""google/rembert""": 256,
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __snake_case : Any , __snake_case : int=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Any="[CLS]" , __snake_case : Optional[int]="[SEP]" , __snake_case : int="[UNK]" , __snake_case : Optional[int]="[SEP]" , __snake_case : Any="[PAD]" , __snake_case : int="[CLS]" , __snake_case : Any="[MASK]" , **__snake_case : int , ):
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
lowerCamelCase :Dict = do_lower_case
lowerCamelCase :Union[str, Any] = remove_space
lowerCamelCase :Tuple = keep_accents
lowerCamelCase :Optional[Any] = vocab_file
lowerCamelCase :Any = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case )
@property
def snake_case ( self : Dict ):
return len(self.sp_model )
def snake_case ( self : List[str] ):
lowerCamelCase :str = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
lowerCamelCase :Optional[int] = self.__dict__.copy()
lowerCamelCase :Optional[int] = None
return state
def __setstate__( self : List[str] , __snake_case : Optional[Any] ):
lowerCamelCase :Optional[Any] = d
lowerCamelCase :Any = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any]=False ):
lowerCamelCase :Dict = self.sp_model.EncodeAsPieces(__snake_case )
return pieces
def snake_case ( self : str , __snake_case : int ):
return self.sp_model.PieceToId(__snake_case )
def snake_case ( self : List[Any] , __snake_case : Union[str, Any] ):
return self.sp_model.IdToPiece(__snake_case )
def snake_case ( self : List[str] , __snake_case : List[str] ):
lowerCamelCase :Optional[int] = self.sp_model.decode_pieces(__snake_case )
return out_string
def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :List[str] = [self.sep_token_id]
lowerCamelCase :str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :int = [self.sep_token_id]
lowerCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
lowerCamelCase :Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCAmelCase :
def __init__( self : Any ):
lowerCamelCase :Union[str, Any] = {}
def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str=1 ):
if self.graph.get(__snake_case ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase :Dict = [[w, v]]
if not self.graph.get(__snake_case ):
lowerCamelCase :List[Any] = []
def snake_case ( self : int ):
return list(self.graph )
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
def snake_case ( self : Optional[int] , __snake_case : Tuple=-2 , __snake_case : int=-1 ):
if s == d:
return []
lowerCamelCase :int = []
lowerCamelCase :Tuple = []
if s == -2:
lowerCamelCase :str = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
lowerCamelCase :List[str] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def snake_case ( self : Union[str, Any] , __snake_case : List[Any]=-1 ):
if c == -1:
lowerCamelCase :str = floor(random() * 10000 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase :Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def snake_case ( self : Optional[Any] , __snake_case : str=-2 ):
lowerCamelCase :List[str] = deque()
lowerCamelCase :List[str] = []
if s == -2:
lowerCamelCase :Any = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
lowerCamelCase :Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : str , __snake_case : str ):
lowerCamelCase :Union[str, Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case ( self : int , __snake_case : List[str] ):
return len(self.graph[u] )
def snake_case ( self : str , __snake_case : List[Any]=-2 ):
lowerCamelCase :List[Any] = []
lowerCamelCase :Dict = []
if s == -2:
lowerCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[int] = s
lowerCamelCase :List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__snake_case ) != 0:
lowerCamelCase :Optional[int] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Union[str, Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return sorted_nodes
def snake_case ( self : int ):
lowerCamelCase :Any = []
lowerCamelCase :Optional[int] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :int = -2
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :str = s
lowerCamelCase :str = False
lowerCamelCase :int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :str = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Tuple = True
if len(__snake_case ) != 0:
lowerCamelCase :int = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :Tuple = s
lowerCamelCase :str = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :str = []
lowerCamelCase :List[str] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :List[str] = -2
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Optional[Any] = s
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :List[Any] = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Tuple = True
if len(__snake_case ) != 0:
lowerCamelCase :int = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :int = s
lowerCamelCase :Tuple = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def snake_case ( self : List[Any] , __snake_case : List[str]=-2 , __snake_case : Optional[Any]=-1 ):
lowerCamelCase :Union[str, Any] = time()
self.dfs(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = time()
return end - begin
def snake_case ( self : int , __snake_case : int=-2 ):
lowerCamelCase :List[Any] = time()
self.bfs(__snake_case )
lowerCamelCase :Tuple = time()
return end - begin
class _lowerCAmelCase :
def __init__( self : Union[str, Any] ):
lowerCamelCase :Any = {}
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : Dict , __snake_case : List[str]=1 ):
# check if the u exists
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase :int = [[w, v]]
# add the other way
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase :str = [[w, u]]
def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Tuple ):
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
# the other way round
if self.graph.get(__snake_case ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__snake_case )
def snake_case ( self : int , __snake_case : int=-2 , __snake_case : List[Any]=-1 ):
if s == d:
return []
lowerCamelCase :int = []
lowerCamelCase :str = []
if s == -2:
lowerCamelCase :Dict = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
lowerCamelCase :Optional[int] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def snake_case ( self : Dict , __snake_case : List[str]=-1 ):
if c == -1:
lowerCamelCase :str = floor(random() * 10000 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase :Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def snake_case ( self : int , __snake_case : str=-2 ):
lowerCamelCase :Optional[int] = deque()
lowerCamelCase :List[Any] = []
if s == -2:
lowerCamelCase :Union[str, Any] = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
lowerCamelCase :str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : int , __snake_case : Any ):
return len(self.graph[u] )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[int] = []
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[Any] = -2
lowerCamelCase :Optional[int] = []
lowerCamelCase :int = s
lowerCamelCase :str = False
lowerCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :Tuple = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :str = True
if len(__snake_case ) != 0:
lowerCamelCase :Dict = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :Union[str, Any] = s
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Any = -2
lowerCamelCase :Dict = []
lowerCamelCase :Dict = s
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :int = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Optional[Any] = True
if len(__snake_case ) != 0:
lowerCamelCase :Any = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :List[Any] = False
indirect_parents.append(__snake_case )
lowerCamelCase :Any = s
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def snake_case ( self : List[Any] ):
return list(self.graph )
def snake_case ( self : Optional[int] , __snake_case : Dict=-2 , __snake_case : int=-1 ):
lowerCamelCase :str = time()
self.dfs(__snake_case , __snake_case )
lowerCamelCase :Tuple = time()
return end - begin
def snake_case ( self : List[str] , __snake_case : Union[str, Any]=-2 ):
lowerCamelCase :List[Any] = time()
self.bfs(__snake_case )
lowerCamelCase :Tuple = time()
return end - begin
| 49
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 1
|
from __future__ import annotations
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : int ):
lowerCamelCase :Tuple = order
# a_{0} ... a_{k}
lowerCamelCase :Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase :Optional[int] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase :Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase :Any = [0.0] * self.order
def snake_case ( self : List[str] , __snake_case : list[float] , __snake_case : list[float] ):
if len(__snake_case ) < self.order:
lowerCamelCase :int = [1.0, *a_coeffs]
if len(__snake_case ) != self.order + 1:
lowerCamelCase :str = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(__snake_case )}"
)
raise ValueError(__snake_case )
if len(__snake_case ) != self.order + 1:
lowerCamelCase :str = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(__snake_case )}"
)
raise ValueError(__snake_case )
lowerCamelCase :Optional[int] = a_coeffs
lowerCamelCase :Tuple = b_coeffs
def snake_case ( self : Tuple , __snake_case : float ):
lowerCamelCase :int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase :Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase :Optional[Any] = self.input_history[:-1]
lowerCamelCase :Union[str, Any] = self.output_history[:-1]
lowerCamelCase :int = sample
lowerCamelCase :int = result
return result
| 49
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 1
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
# to overwrite at feature extractactor specific tests
_UpperCAmelCase = None
_UpperCAmelCase = None
@property
def snake_case ( self : Any ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__snake_case , '''feature_size''' ) )
self.assertTrue(hasattr(__snake_case , '''sampling_rate''' ) )
self.assertTrue(hasattr(__snake_case , '''padding_value''' ) )
def snake_case ( self : Dict ):
lowerCamelCase :int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase :Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :str = feat_extract.model_input_names[0]
lowerCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__snake_case ) == len(__snake_case ) for x, y in zip(__snake_case , processed_features[input_name] ) ) )
lowerCamelCase :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
lowerCamelCase :List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowerCamelCase :Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase :int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
lowerCamelCase :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :List[str] = feat_extract.model_input_names[0]
lowerCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowerCamelCase :Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase :List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
lowerCamelCase :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Tuple = feat_extract.model_input_names[0]
lowerCamelCase :str = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowerCamelCase :List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase :Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self : Dict , __snake_case : Tuple=False ):
def _inputs_have_equal_length(__snake_case : int ):
lowerCamelCase :Any = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Tuple , __snake_case : Tuple ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1e-3 ):
return False
return True
lowerCamelCase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
lowerCamelCase :Any = feat_extract.model_input_names[0]
lowerCamelCase :Tuple = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :List[Any] = self.feat_extract_tester.seq_length_diff
lowerCamelCase :List[str] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase :List[str] = self.feat_extract_tester.min_seq_length
lowerCamelCase :Any = self.feat_extract_tester.batch_size
lowerCamelCase :List[Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase :Optional[int] = feat_extract.pad(__snake_case , padding=__snake_case )
lowerCamelCase :List[str] = input_a[input_name]
lowerCamelCase :Union[str, Any] = feat_extract.pad(__snake_case , padding='''longest''' )
lowerCamelCase :Dict = input_a[input_name]
lowerCamelCase :List[str] = feat_extract.pad(__snake_case , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowerCamelCase :List[Any] = input_a[input_name]
lowerCamelCase :Tuple = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''np''' )
lowerCamelCase :Optional[Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='''max_length''' )[input_name]
lowerCamelCase :Any = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=__snake_case , return_tensors='''np''' )
lowerCamelCase :List[str] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase :Tuple = feat_extract.pad(__snake_case , pad_to_multiple_of=10 )
lowerCamelCase :Union[str, Any] = input_a[input_name]
lowerCamelCase :Dict = feat_extract.pad(__snake_case , padding='''longest''' , pad_to_multiple_of=10 )
lowerCamelCase :Optional[int] = input_a[input_name]
lowerCamelCase :Tuple = feat_extract.pad(
__snake_case , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__snake_case )
lowerCamelCase :Optional[Any] = input_a[input_name]
lowerCamelCase :Tuple = feat_extract.pad(
__snake_case , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__snake_case , return_tensors='''np''' , )
lowerCamelCase :str = input_a[input_name]
self.assertTrue(all(len(__snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCamelCase :str = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self : Dict , __snake_case : Union[str, Any]=False ):
def _inputs_have_equal_length(__snake_case : List[str] ):
lowerCamelCase :str = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Dict , __snake_case : List[str] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1e-3 ):
return False
return True
lowerCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
lowerCamelCase :Tuple = feat_extract.model_input_names[0]
lowerCamelCase :Optional[int] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCamelCase :List[str] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=__snake_case )
lowerCamelCase :Optional[Any] = input_a[input_name]
lowerCamelCase :List[Any] = feat_extract.pad(__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowerCamelCase :Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to smallest with np
lowerCamelCase :Optional[Any] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=__snake_case , )
lowerCamelCase :int = input_a[input_name]
lowerCamelCase :List[Any] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowerCamelCase :Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to middle
lowerCamelCase :Any = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__snake_case , return_tensors='''np''' , )
lowerCamelCase :Optional[int] = input_a[input_name]
lowerCamelCase :Optional[int] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=__snake_case )
lowerCamelCase :Dict = input_a[input_name]
lowerCamelCase :Optional[int] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowerCamelCase :List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='''longest''' , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='''longest''' , truncation=__snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding='''max_length''' , truncation=__snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase :Any = 12
lowerCamelCase :Dict = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
lowerCamelCase :List[str] = input_a[input_name]
lowerCamelCase :Dict = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , )
lowerCamelCase :List[str] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase :List[str] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase :List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
def snake_case ( self : Union[str, Any] ):
self._check_padding(numpify=__snake_case )
def snake_case ( self : int ):
self._check_padding(numpify=__snake_case )
def snake_case ( self : Union[str, Any] ):
self._check_truncation(numpify=__snake_case )
def snake_case ( self : str ):
self._check_truncation(numpify=__snake_case )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase :Dict = feat_extract.model_input_names[0]
lowerCamelCase :Any = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :Optional[Any] = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''np''' )[input_name]
lowerCamelCase :Dict = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase :Tuple = feat_extract.model_input_names[0]
lowerCamelCase :int = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :str = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''np''' )[input_name]
lowerCamelCase :List[Any] = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = self.feat_extract_dict
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :int = self.feature_extraction_class(**__snake_case )
lowerCamelCase :Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase :Tuple = [len(__snake_case ) for x in speech_inputs]
lowerCamelCase :Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase :int = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :int = feat_extract.pad(__snake_case , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __snake_case )
def snake_case ( self : Any ):
lowerCamelCase :str = self.feat_extract_dict
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = self.feature_extraction_class(**__snake_case )
lowerCamelCase :List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase :Optional[int] = [len(__snake_case ) for x in speech_inputs]
lowerCamelCase :List[Any] = feat_extract.model_input_names[0]
lowerCamelCase :str = BatchFeature({input_name: speech_inputs} )
lowerCamelCase :str = min(__snake_case )
lowerCamelCase :List[str] = feat_extract.pad(
__snake_case , padding='''max_length''' , max_length=__snake_case , truncation=__snake_case , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 49
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""MaskFormerFeatureExtractor"""]
A__ = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
A__ = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
import numpy as np
from PIL import Image
def _lowerCamelCase ( a_ : np.ndarray , a_ : int , a_ : int):
lowerCamelCase :Optional[int] = np.array(a_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''')
lowerCamelCase :Dict = 0
lowerCamelCase :int = 0
lowerCamelCase :List[Any] = 0
lowerCamelCase :Dict = 0
# compute the shape of the output matrix
lowerCamelCase :Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase :Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase :Union[str, Any] = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase :Dict = 0
lowerCamelCase :int = 0
return updated_arr
def _lowerCamelCase ( a_ : np.ndarray , a_ : int , a_ : int):
lowerCamelCase :Optional[int] = np.array(a_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''')
lowerCamelCase :Any = 0
lowerCamelCase :Any = 0
lowerCamelCase :Any = 0
lowerCamelCase :List[Any] = 0
# compute the shape of the output matrix
lowerCamelCase :List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase :Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase :Optional[int] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase :str = 0
lowerCamelCase :List[str] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
A__ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A__ = threading.Lock()
A__ = None
A__ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
A__ = logging.WARNING
A__ = True
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = os.getenv('''TRANSFORMERS_VERBOSITY''' , a_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def _lowerCamelCase ( ):
return __name__.split('''.''')[0]
def _lowerCamelCase ( ):
return logging.getLogger(_get_library_name())
def _lowerCamelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCamelCase :Tuple = logging.StreamHandler() # Set sys.stderr as stream.
lowerCamelCase :Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCamelCase :List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
lowerCamelCase :Tuple = False
def _lowerCamelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
lowerCamelCase :Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
lowerCamelCase :int = None
def _lowerCamelCase ( ):
return log_levels
def _lowerCamelCase ( a_ : Optional[str] = None):
if name is None:
lowerCamelCase :List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_)
def _lowerCamelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCamelCase ( a_ : int):
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_)
def _lowerCamelCase ( ):
return set_verbosity(a_)
def _lowerCamelCase ( ):
return set_verbosity(a_)
def _lowerCamelCase ( ):
return set_verbosity(a_)
def _lowerCamelCase ( ):
return set_verbosity(a_)
def _lowerCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def _lowerCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def _lowerCamelCase ( a_ : logging.Handler):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_)
def _lowerCamelCase ( a_ : logging.Handler):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_)
def _lowerCamelCase ( ):
_configure_library_root_logger()
lowerCamelCase :List[str] = False
def _lowerCamelCase ( ):
_configure_library_root_logger()
lowerCamelCase :str = True
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
lowerCamelCase :Any = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''')
handler.setFormatter(a_)
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_)
def _lowerCamelCase ( self : Optional[int] , *a_ : str , **a_ : List[Any]):
lowerCamelCase :str = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , a_)
if no_advisory_warnings:
return
self.warning(*a_ , **a_)
A__ = warning_advice
@functools.lru_cache(a_)
def _lowerCamelCase ( self : int , *a_ : Union[str, Any] , **a_ : Optional[Any]):
self.warning(*a_ , **a_)
A__ = warning_once
class _lowerCAmelCase :
def __init__( self : Any , *__snake_case : str , **__snake_case : List[str] ): # pylint: disable=unused-argument
lowerCamelCase :Union[str, Any] = args[0] if args else None
def __iter__( self : Optional[int] ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , __snake_case : Dict ):
def empty_fn(*__snake_case : List[Any] , **__snake_case : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ):
return self
def __exit__( self : Optional[int] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
return
class _lowerCAmelCase :
def __call__( self : Optional[int] , *__snake_case : Dict , **__snake_case : Optional[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*__snake_case , **__snake_case )
else:
return EmptyTqdm(*__snake_case , **__snake_case )
def snake_case ( self : int , *__snake_case : Union[str, Any] , **__snake_case : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__snake_case , **__snake_case )
def snake_case ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__ = _tqdm_cls()
def _lowerCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active)
def _lowerCamelCase ( ):
global _tqdm_active
lowerCamelCase :List[str] = True
hf_hub_utils.enable_progress_bars()
def _lowerCamelCase ( ):
global _tqdm_active
lowerCamelCase :List[Any] = False
hf_hub_utils.disable_progress_bars()
| 49
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Union[str, Any]):
lowerCamelCase :Dict = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias"))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
])
return rename_keys
def _lowerCamelCase ( a_ : Dict , a_ : Tuple):
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
lowerCamelCase :int = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight")
lowerCamelCase :Optional[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase :Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase :Any = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Dict , a_ : Optional[int]):
lowerCamelCase :Optional[int] = dct.pop(a_)
lowerCamelCase :List[Any] = val
def _lowerCamelCase ( a_ : Any):
if "handwritten" in checkpoint_url:
lowerCamelCase :str = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase :List[str] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : List[str] , a_ : str):
lowerCamelCase :Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=a_)
lowerCamelCase :Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase :str = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase :List[Any] = 10_24
lowerCamelCase :Optional[Any] = 40_96
lowerCamelCase :Optional[Any] = 24
lowerCamelCase :int = 16
lowerCamelCase :Any = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Tuple = '''relu'''
lowerCamelCase :Optional[int] = 10_24
lowerCamelCase :Dict = True
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[Any] = False
# load HuggingFace model
lowerCamelCase :Any = ViTModel(a_ , add_pooling_layer=a_)
lowerCamelCase :Optional[Any] = TrOCRForCausalLM(a_)
lowerCamelCase :Union[str, Any] = VisionEncoderDecoderModel(encoder=a_ , decoder=a_)
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase :Union[str, Any] = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , check_hash=a_)['''model''']
lowerCamelCase :Any = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase :int = state_dict.pop(a_)
if key.startswith('''decoder''') and "output_projection" not in key:
lowerCamelCase :Optional[int] = val
else:
lowerCamelCase :Any = val
# load state dict
model.load_state_dict(a_)
# Check outputs on an image
lowerCamelCase :int = ViTImageProcessor(size=encoder_config.image_size)
lowerCamelCase :Any = RobertaTokenizer.from_pretrained('''roberta-large''')
lowerCamelCase :List[Any] = TrOCRProcessor(a_ , a_)
lowerCamelCase :int = processor(images=prepare_img(a_) , return_tensors='''pt''').pixel_values
# verify logits
lowerCamelCase :List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]])
lowerCamelCase :str = model(pixel_values=a_ , decoder_input_ids=a_)
lowerCamelCase :Optional[Any] = outputs.logits
lowerCamelCase :Optional[Any] = torch.Size([1, 1, 5_02_65])
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase :Dict = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311])
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase :Optional[int] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170])
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase :str = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210])
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase :Tuple = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , a_ , atol=1e-3), "First elements of logits not as expected"
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 1
|
def _lowerCamelCase ( a_ : dict):
lowerCamelCase :set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase :set[int] = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_)
for node in graph)
def _lowerCamelCase ( a_ : dict , a_ : int , a_ : set , a_ : set):
visited.add(a_)
rec_stk.add(a_)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Dict ):
lowerCamelCase :Any = ZeroShotClassificationPipeline(
model=__snake_case , tokenizer=__snake_case , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case ( self : int , __snake_case : str , __snake_case : List[Any] ):
lowerCamelCase :List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
# No kwarg
lowerCamelCase :Any = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
lowerCamelCase :Any = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
lowerCamelCase :int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase :Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase :Optional[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase :List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(1 )
] , )
lowerCamelCase :str = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(__snake_case ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__snake_case ):
classifier(__snake_case , candidate_labels='''politics''' )
with self.assertRaises(__snake_case ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__snake_case ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__snake_case )
with self.assertRaises(__snake_case ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__snake_case ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__snake_case , )
self.run_entailment_id(__snake_case )
def snake_case ( self : Dict , __snake_case : Pipeline ):
lowerCamelCase :int = zero_shot_classifier.model.config
lowerCamelCase :Optional[int] = config.labelaid
lowerCamelCase :Optional[Any] = zero_shot_classifier.entailment_id
lowerCamelCase :List[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase :Any = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase :Tuple = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase :List[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase :Any = original_labelaid
self.assertEqual(__snake_case , zero_shot_classifier.entailment_id )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :str = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase :Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase :List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase :Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase :int = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = XLMRobertaTokenizer
_UpperCAmelCase = XLMRobertaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def snake_case ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase :List[Any] = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = '''<pad>'''
lowerCamelCase :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 1002 )
def snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def snake_case ( self : Tuple ):
lowerCamelCase :str = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
lowerCamelCase :int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase :str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase :Tuple = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase :Any = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase :int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = tempfile.mkdtemp()
lowerCamelCase :List[Any] = tokenizer_r.save_pretrained(__snake_case )
lowerCamelCase :List[str] = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase :Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Dict = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
lowerCamelCase :List[str] = tempfile.mkdtemp()
lowerCamelCase :Optional[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :Any = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Any = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Optional[int] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
lowerCamelCase :Dict = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :Optional[int] = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase :Tuple = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :str = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@cached_property
def snake_case ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
lowerCamelCase :Optional[int] = XLMRobertaTokenizer(f.name , keep_accents=__snake_case )
lowerCamelCase :Any = pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase :Optional[Any] = self.get_tokenizer()
lowerCamelCase :Tuple = self.get_rust_tokenizer()
lowerCamelCase :List[str] = '''I was born in 92000, and this is falsé.'''
lowerCamelCase :Any = tokenizer.tokenize(__snake_case )
lowerCamelCase :Any = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase :List[str] = tokenizer.encode(__snake_case )
lowerCamelCase :Dict = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = '''Hello World!'''
lowerCamelCase :Dict = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCamelCase :Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def snake_case ( self : str ):
# fmt: off
lowerCamelCase :Any = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 49
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A__ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase ( a_ : Optional[Any]):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase ( a_ : Optional[int] , a_ : List[Any]):
if args.student_type == "roberta":
lowerCamelCase :Dict = False
elif args.student_type == "gpt2":
lowerCamelCase :str = False
def _lowerCamelCase ( a_ : Any , a_ : int):
if args.student_type == "roberta":
lowerCamelCase :Union[str, Any] = False
def _lowerCamelCase ( ):
lowerCamelCase :List[Any] = argparse.ArgumentParser(description='''Training''')
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''')
parser.add_argument(
'''--dump_path''' , type=a_ , required=a_ , help='''The output directory (log, checkpoints, parameters, etc.)''')
parser.add_argument(
'''--data_file''' , type=a_ , required=a_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=a_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=a_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=a_ , required=a_ , help='''Path to the student configuration.''')
parser.add_argument(
'''--student_pretrained_weights''' , default=a_ , type=a_ , help='''Load student initialization checkpoint.''')
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=a_ , help='''Teacher type (BERT, RoBERTa).''')
parser.add_argument('''--teacher_name''' , type=a_ , required=a_ , help='''The teacher model.''')
parser.add_argument('''--temperature''' , default=2.0 , type=a_ , help='''Temperature for the softmax temperature.''')
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=a_ , help='''Linear weight for the distillation loss. Must be >=0.''')
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=a_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=a_ , help='''Linear weight for the CLM loss. Must be >=0.''')
parser.add_argument('''--alpha_mse''' , default=0.0 , type=a_ , help='''Linear weight of the MSE loss. Must be >=0.''')
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=a_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''')
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''')
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=a_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=a_ , help='''Proportion of tokens to mask out.''')
parser.add_argument('''--word_keep''' , default=0.1 , type=a_ , help='''Proportion of tokens to keep.''')
parser.add_argument('''--word_rand''' , default=0.1 , type=a_ , help='''Proportion of tokens to randomly replace.''')
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=a_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=a_ , help='''The token counts in the data_file for MLM.''')
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=a_ , default=3 , help='''Number of pass on the whole dataset.''')
parser.add_argument('''--batch_size''' , type=a_ , default=5 , help='''Batch size (for each process).''')
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=a_ , help='''Linear warmup proportion.''')
parser.add_argument('''--weight_decay''' , default=0.0 , type=a_ , help='''Weight decay if we apply some.''')
parser.add_argument('''--learning_rate''' , default=5e-4 , type=a_ , help='''The initial learning rate for Adam.''')
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=a_ , help='''Max gradient norm.''')
parser.add_argument('''--initializer_range''' , default=0.02 , type=a_ , help='''Random initialization range.''')
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=a_ , default=1 , help='''Number of GPUs in the node.''')
parser.add_argument('''--local_rank''' , type=a_ , default=-1 , help='''Distributed training - Local rank''')
parser.add_argument('''--seed''' , type=a_ , default=56 , help='''Random seed''')
parser.add_argument('''--log_interval''' , type=a_ , default=5_00 , help='''Tensorboard logging interval.''')
parser.add_argument('''--checkpoint_interval''' , type=a_ , default=40_00 , help='''Checkpoint interval.''')
lowerCamelCase :List[Any] = parser.parse_args()
sanity_checks(a_)
# ARGS #
init_gpu_params(a_)
set_seed(a_)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''')
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F"Experiment will be dumped and logged in {args.dump_path}")
# SAVE PARAMS #
logger.info(F"Param: {args}")
with open(os.path.join(args.dump_path , '''parameters.json''') , '''w''') as f:
json.dump(vars(a_) , a_ , indent=4)
git_log(args.dump_path)
lowerCamelCase , lowerCamelCase , lowerCamelCase :int = MODEL_CLASSES[args.student_type]
lowerCamelCase , lowerCamelCase , lowerCamelCase :int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase :Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name)
lowerCamelCase :List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase :Optional[Any] = tokenizer.all_special_tokens.index(a_)
lowerCamelCase :List[Any] = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}")
lowerCamelCase :Dict = special_tok_ids
lowerCamelCase :Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file , '''rb''') as fp:
lowerCamelCase :str = pickle.load(a_)
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)")
with open(args.token_counts , '''rb''') as fp:
lowerCamelCase :Union[str, Any] = pickle.load(a_)
lowerCamelCase :int = np.maximum(a_ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase :Tuple = 0.0 # do not predict special tokens
lowerCamelCase :Optional[Any] = torch.from_numpy(a_)
else:
lowerCamelCase :Optional[int] = None
lowerCamelCase :Optional[int] = LmSeqsDataset(params=a_ , data=a_)
logger.info('''Data loader created.''')
# STUDENT #
logger.info(F"Loading student config from {args.student_config}")
lowerCamelCase :Dict = student_config_class.from_pretrained(args.student_config)
lowerCamelCase :Tuple = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}")
lowerCamelCase :Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=a_)
else:
lowerCamelCase :Tuple = student_model_class(a_)
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}")
logger.info('''Student loaded.''')
# TEACHER #
lowerCamelCase :str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=a_)
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}")
logger.info(F"Teacher loaded from {args.teacher_name}.")
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_ , a_)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_ , a_)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase :Optional[Any] = Distiller(
params=a_ , dataset=a_ , token_probs=a_ , student=a_ , teacher=a_)
distiller.train()
logger.info('''Let\'s go get some drinks.''')
if __name__ == "__main__":
main()
| 49
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 1
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def snake_case ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : str ):
lowerCamelCase :int = ort.SessionOptions()
lowerCamelCase :Dict = False
return options
def snake_case ( self : Dict ):
lowerCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase :Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Optional[int] = '''A red cat sitting on a park bench'''
lowerCamelCase :Optional[int] = np.random.RandomState(0 )
lowerCamelCase :int = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :str = output.images
lowerCamelCase :Tuple = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase :Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self : List[Any] ):
lowerCamelCase :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase :int = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCamelCase :Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Any = '''A red cat sitting on a park bench'''
lowerCamelCase :Optional[int] = np.random.RandomState(0 )
lowerCamelCase :str = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='''np''' , )
lowerCamelCase :str = output.images
lowerCamelCase :Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase :Optional[Any] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 49
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 1
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A__ = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
A__ = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
A__ = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def _lowerCamelCase ( a_ : Tuple , a_ : List[Any]):
return float((preds == labels).mean())
def _lowerCamelCase ( a_ : Tuple , a_ : Optional[Any]):
lowerCamelCase :int = simple_accuracy(a_ , a_)
lowerCamelCase :Tuple = float(fa_score(y_true=a_ , y_pred=a_))
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( a_ : str , a_ : List[str]):
lowerCamelCase :Optional[Any] = np.array(a_)
lowerCamelCase :Optional[int] = np.array(a_)
lowerCamelCase :Any = en_sentvecs.shape[0]
# mean centering
lowerCamelCase :List[str] = en_sentvecs - np.mean(a_ , axis=0)
lowerCamelCase :Tuple = in_sentvecs - np.mean(a_ , axis=0)
lowerCamelCase :int = cdist(a_ , a_ , '''cosine''')
lowerCamelCase :List[Any] = np.array(range(a_))
lowerCamelCase :Any = sim.argsort(axis=1)[:, :10]
lowerCamelCase :str = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : str ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def snake_case ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : str ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__snake_case , __snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 49
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 1
|
def _lowerCamelCase ( a_ : int , a_ : int):
while second != 0:
lowerCamelCase :str = first & second
first ^= second
lowerCamelCase :Union[str, Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = int(input("""Enter the first number: """).strip())
A__ = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 49
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 1
|
from __future__ import annotations
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Union[str, Any] = [True] * limit
lowerCamelCase :Tuple = False
lowerCamelCase :List[str] = False
lowerCamelCase :List[Any] = True
for i in range(3 , int(limit**0.5 + 1) , 2):
lowerCamelCase :List[Any] = i * 2
while index < limit:
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :List[Any] = index + i
lowerCamelCase :List[str] = [2]
for i in range(3 , a_ , 2):
if is_prime[i]:
primes.append(a_)
return primes
def _lowerCamelCase ( a_ : int = 1_00_00_00):
lowerCamelCase :Tuple = prime_sieve(a_)
lowerCamelCase :str = 0
lowerCamelCase :Dict = 0
for i in range(len(a_)):
for j in range(i + length , len(a_)):
lowerCamelCase :Dict = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase :str = j - i
lowerCamelCase :Optional[int] = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 49
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A__ = """src/transformers"""
A__ = """docs/source/en/tasks"""
def _lowerCamelCase ( a_ : int , a_ : Optional[int] , a_ : int):
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
lowerCamelCase :Dict = f.readlines()
# Find the start prompt.
lowerCamelCase :List[str] = 0
while not lines[start_index].startswith(a_):
start_index += 1
start_index += 1
lowerCamelCase :Optional[Any] = start_index
while not lines[end_index].startswith(a_):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A__ = direct_transformers_import(TRANSFORMERS_PATH)
A__ = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A__ = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :Tuple = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase :Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ , set())
lowerCamelCase :List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()]) + "\n"
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any]=False):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = _find_text_in_file(
filename=os.path.join(a_ , a_) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowerCamelCase :str = get_model_list_for_task(a_)
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ , a_) , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
''' to fix this.''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 49
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 1
|
from collections import deque
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
lowerCamelCase :List[Any] = process_name # process name
lowerCamelCase :int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase :int = arrival_time
lowerCamelCase :int = burst_time # remaining burst time
lowerCamelCase :Tuple = 0 # total time of the process wait in ready queue
lowerCamelCase :Any = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : int , __snake_case : list[int] , __snake_case : deque[Process] , __snake_case : int , ):
# total number of mlfq's queues
lowerCamelCase :List[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase :int = time_slices
# unfinished process is in this ready_queue
lowerCamelCase :str = queue
# current time
lowerCamelCase :Optional[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase :deque[Process] = deque()
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self : List[str] , __snake_case : list[Process] ):
lowerCamelCase :str = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self : Union[str, Any] , __snake_case : list[Process] ):
lowerCamelCase :Optional[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self : Any , __snake_case : list[Process] ):
lowerCamelCase :List[str] = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self : int , __snake_case : deque[Process] ):
return [q.burst_time for q in queue]
def snake_case ( self : Optional[Any] , __snake_case : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self : List[str] , __snake_case : deque[Process] ):
lowerCamelCase :deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
lowerCamelCase :Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase :Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase :Any = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase :Dict = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self : Tuple , __snake_case : deque[Process] , __snake_case : int ):
lowerCamelCase :deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
lowerCamelCase :List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase :List[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase :Optional[Any] = 0
# set the finish time
lowerCamelCase :Optional[int] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase :Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self : str ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
A__ = MLFQ(number_of_queues, time_slices, queue, 0)
A__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : List[str] ):
lowerCamelCase :str = '''laion/clap-htsat-unfused'''
lowerCamelCase :Union[str, Any] = tempfile.mkdtemp()
def snake_case ( self : List[str] , **__snake_case : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A__ )
def snake_case ( self : Optional[int] , **__snake_case : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A__ )
def snake_case ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :Any = self.get_feature_extractor()
lowerCamelCase :str = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase :List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def snake_case ( self : List[str] ):
lowerCamelCase :Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase :Optional[int] = self.get_feature_extractor(do_normalize=A__ , padding_value=1.0 )
lowerCamelCase :Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A__ )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = self.get_feature_extractor()
lowerCamelCase :Any = self.get_tokenizer()
lowerCamelCase :Optional[int] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
lowerCamelCase :List[str] = floats_list((3, 1000) )
lowerCamelCase :Dict = feature_extractor(A__ , return_tensors='''np''' )
lowerCamelCase :str = processor(audios=A__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :str = self.get_feature_extractor()
lowerCamelCase :Tuple = self.get_tokenizer()
lowerCamelCase :Optional[Any] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
lowerCamelCase :Tuple = '''This is a test string'''
lowerCamelCase :Optional[int] = processor(text=A__ )
lowerCamelCase :str = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
lowerCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase :str = processor.batch_decode(A__ )
lowerCamelCase :Optional[int] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def snake_case ( self : List[str] ):
lowerCamelCase :Any = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :List[Any] = ClapProcessor(tokenizer=A__ , feature_extractor=A__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class _lowerCAmelCase ( _snake_case ):
_UpperCAmelCase = 'xlnet'
_UpperCAmelCase = ['mems']
_UpperCAmelCase = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , __snake_case : List[Any]=32000 , __snake_case : Optional[Any]=1024 , __snake_case : str=24 , __snake_case : Optional[Any]=16 , __snake_case : List[Any]=4096 , __snake_case : List[Any]="gelu" , __snake_case : Any=True , __snake_case : List[str]="bi" , __snake_case : Dict=0.0_2 , __snake_case : Tuple=1e-1_2 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : List[Any]=None , __snake_case : Dict=True , __snake_case : Dict=False , __snake_case : str=False , __snake_case : Tuple=-1 , __snake_case : str=False , __snake_case : Any="last" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]="tanh" , __snake_case : int=0.1 , __snake_case : Optional[int]=5 , __snake_case : List[str]=5 , __snake_case : Dict=5 , __snake_case : str=1 , __snake_case : Dict=2 , **__snake_case : Tuple , ):
lowerCamelCase :Dict = vocab_size
lowerCamelCase :int = d_model
lowerCamelCase :Union[str, Any] = n_layer
lowerCamelCase :List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCamelCase :List[str] = d_model // n_head
lowerCamelCase :List[str] = ff_activation
lowerCamelCase :Union[str, Any] = d_inner
lowerCamelCase :Optional[Any] = untie_r
lowerCamelCase :List[str] = attn_type
lowerCamelCase :Union[str, Any] = initializer_range
lowerCamelCase :Tuple = layer_norm_eps
lowerCamelCase :Dict = dropout
lowerCamelCase :Any = mem_len
lowerCamelCase :Optional[int] = reuse_len
lowerCamelCase :Optional[Any] = bi_data
lowerCamelCase :Union[str, Any] = clamp_len
lowerCamelCase :Any = same_length
lowerCamelCase :int = summary_type
lowerCamelCase :List[str] = summary_use_proj
lowerCamelCase :Optional[int] = summary_activation
lowerCamelCase :int = summary_last_dropout
lowerCamelCase :int = start_n_top
lowerCamelCase :List[str] = end_n_top
lowerCamelCase :Tuple = bos_token_id
lowerCamelCase :List[Any] = pad_token_id
lowerCamelCase :List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , lowerCAmelCase__ , )
lowerCamelCase :List[str] = kwargs['''use_cache''']
lowerCamelCase :Tuple = use_mems_eval
lowerCamelCase :Union[str, Any] = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case ( self : Dict ):
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self : List[str] , __snake_case : int ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :int = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"{test_file} instead.")
lowerCamelCase :Optional[Any] = components[-1]
if not test_fn.endswith('''py'''):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead.")
if not test_fn.startswith('''test_modeling_'''):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.")
lowerCamelCase :Optional[Any] = components[:-1] + [test_fn.replace('''.py''' , '''''')]
lowerCamelCase :Optional[Any] = '''.'''.join(_SCREAMING_SNAKE_CASE)
return test_module_path
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Union[str, Any] = get_module_path(_SCREAMING_SNAKE_CASE)
lowerCamelCase :Union[str, Any] = importlib.import_module(_SCREAMING_SNAKE_CASE)
return test_module
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :int = []
lowerCamelCase :List[str] = get_test_module(_SCREAMING_SNAKE_CASE)
for attr in dir(_SCREAMING_SNAKE_CASE):
if attr.endswith('''ModelTester'''):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda a_: x.__name__)
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[str] = []
lowerCamelCase :Optional[Any] = get_test_module(_SCREAMING_SNAKE_CASE)
for attr in dir(_SCREAMING_SNAKE_CASE):
lowerCamelCase :List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase :int = getattr(_SCREAMING_SNAKE_CASE , '''all_model_classes''' , [])
if len(_SCREAMING_SNAKE_CASE) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE)
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda a_: x.__name__)
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :str = get_test_classes(_SCREAMING_SNAKE_CASE)
lowerCamelCase :Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda a_: x.__name__)
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Union[str, Any] = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , '''setUp'''):
test.setUp()
lowerCamelCase :int = None
if hasattr(_SCREAMING_SNAKE_CASE , '''model_tester'''):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase :Union[str, Any] = test.model_tester.__class__
return model_tester
def _lowerCamelCase ( a_ : str , a_ : Optional[int]):
lowerCamelCase :Optional[Any] = get_test_classes(_SCREAMING_SNAKE_CASE)
lowerCamelCase :str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE)
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda a_: x.__name__)
def _lowerCamelCase ( a_ : int , a_ : int):
lowerCamelCase :Union[str, Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
lowerCamelCase :Optional[int] = []
for test_class in test_classes:
lowerCamelCase :int = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE)
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE)
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda a_: x.__name__)
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :Any = get_test_classes(_SCREAMING_SNAKE_CASE)
lowerCamelCase :Optional[Any] = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE) for test_class in test_classes}
return test_tester_mapping
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Optional[int] = get_model_classes(_SCREAMING_SNAKE_CASE)
lowerCamelCase :Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for model_class in model_classes
}
return model_test_mapping
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Optional[int] = get_model_classes(_SCREAMING_SNAKE_CASE)
lowerCamelCase :Dict = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCamelCase ( a_ : List[Any]):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple)):
return [to_json(_SCREAMING_SNAKE_CASE) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
return {to_json(_SCREAMING_SNAKE_CASE): to_json(_SCREAMING_SNAKE_CASE) for k, v in o.items()}
else:
return o
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from collections.abc import Generator
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Any = 0, 1
while True:
lowerCamelCase , lowerCamelCase :Dict = b, a + b
yield b
def _lowerCamelCase ( a_ : Dict = 10_00):
lowerCamelCase :Optional[int] = 1
lowerCamelCase :Dict = fibonacci_generator()
while len(str(next(a_))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = '''▁'''
A__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
A__ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
A__ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
A__ = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
A__ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['input_ids']
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = RESOURCE_FILES_NAMES
def __init__( self : Any , __snake_case : Tuple , __snake_case : Optional[int]=None , __snake_case : Any=False , __snake_case : List[str]="utf8" , __snake_case : Optional[int]="[UNK]" , __snake_case : Dict="[SEP]" , __snake_case : List[str]="[PAD]" , __snake_case : Dict="[CLS]" , __snake_case : Optional[int]="[MASK]" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[int] , ):
lowerCamelCase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , vocab_file=UpperCamelCase_ , encoding=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCamelCase :Dict = do_lower_case
lowerCamelCase :Optional[Any] = sentencepiece_model_ckpt
lowerCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase :List[Any] = self.load_vocab(filepath=UpperCamelCase_ )
else:
lowerCamelCase :str = {self.sp_model.id_to_piece(UpperCamelCase_ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase :List[str] = {v: k for k, v in self.vocab.items()}
def snake_case ( self : List[str] , __snake_case : Dict ):
if text is None:
return None
lowerCamelCase :Optional[Any] = self.tokenize(UpperCamelCase_ )
lowerCamelCase :int = '', []
for i, ch in enumerate(UpperCamelCase_ ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase :str = self.SP_CHAR_MAPPING.get(UpperCamelCase_ )
else:
lowerCamelCase :Optional[int] = unicodedata.normalize('''NFKC''' , UpperCamelCase_ )
if self.is_whitespace(UpperCamelCase_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase_ ) )
lowerCamelCase :Any = normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase :Union[str, Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase :Tuple = token[1:]
lowerCamelCase :Optional[Any] = text[offset:].index(UpperCamelCase_ ) + offset
lowerCamelCase :Any = start + len(UpperCamelCase_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase :Tuple = end
return token_mapping
@property
def snake_case ( self : Dict ):
return len(self.vocab )
def snake_case ( self : Tuple ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[str] ):
lowerCamelCase :Optional[int] = self.__dict__.copy()
lowerCamelCase :List[str] = None
return state
def __setstate__( self : List[Any] , __snake_case : Dict ):
lowerCamelCase :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :Tuple = {}
lowerCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case ( self : Dict , __snake_case : Dict ):
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase_ , UpperCamelCase_ ) for c in text) )
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : Dict=False , __snake_case : Optional[int]=64 , __snake_case : Tuple=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowerCamelCase :List[Any] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowerCamelCase :Optional[int] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowerCamelCase :List[Any] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowerCamelCase :Any = self.sp_model.EncodeAsPieces(UpperCamelCase_ )
else:
lowerCamelCase :Tuple = self.sp_model.SampleEncodeAsPieces(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase :Any = []
for pi, piece in enumerate(UpperCamelCase_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase_ ) and pi != 0:
new_pieces.append(UpperCamelCase_ )
continue
else:
continue
lowerCamelCase :str = 0
for i, chunk in enumerate(UpperCamelCase_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase_ ) or self.is_punct(UpperCamelCase_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase_ )
lowerCamelCase :Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase :Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase :Any = i
if len(UpperCamelCase_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case ( self : Tuple , __snake_case : Dict ):
lowerCamelCase :str = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def snake_case ( self : str , __snake_case : str ):
lowerCamelCase :str = self.convert_ids_to_tokens(UpperCamelCase_ )
lowerCamelCase :int = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def snake_case ( self : Optional[Any] , __snake_case : Any ):
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
return self.reverse_vocab.get(UpperCamelCase_ , self.unk_token )
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : int=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase :List[Any] = [self.cls_token_id]
lowerCamelCase :Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case ( self : Dict , __snake_case : Optional[Any] , __snake_case : str=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None , __snake_case : Any=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase_ ) + 1) + [1] * (len(UpperCamelCase_ ) + 3)
def snake_case ( self : str , __snake_case : Union[str, Any] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case ( self : Optional[int] , __snake_case : List[Any] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case ( self : Union[str, Any] , __snake_case : List[str] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case ( self : Any , __snake_case : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase_ ) == 1:
lowerCamelCase :Any = unicodedata.category(UpperCamelCase_ )
if cat == "Zs":
return True
return False
def snake_case ( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :List[str] = {}
with io.open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase_ ):
lowerCamelCase :List[str] = line.rstrip('''\n''' )
lowerCamelCase :Optional[Any] = int(UpperCamelCase_ )
return token_to_idx
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
lowerCamelCase :Optional[int] = 0
if os.path.isdir(UpperCamelCase_ ):
lowerCamelCase :Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase :Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase :Optional[Any] = token_index
writer.write(token + '''\n''' )
index += 1
lowerCamelCase :int = os.path.join(UpperCamelCase_ , '''sentencepiece.bpe.model''' )
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCamelCase :int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (vocab_file,)
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A__ = logging.get_logger("""transformers.models.encodec""")
A__ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
A__ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
A__ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
A__ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
A__ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
A__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A__ = []
A__ = []
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : int):
for attribute in key.split('''.'''):
lowerCamelCase :Optional[Any] = getattr(_A , _A)
if weight_type is not None:
lowerCamelCase :List[str] = getattr(_A , _A).shape
else:
lowerCamelCase :List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}")
if weight_type == "weight":
lowerCamelCase :Dict = value
elif weight_type == "weight_g":
lowerCamelCase :str = value
elif weight_type == "weight_v":
lowerCamelCase :Optional[int] = value
elif weight_type == "bias":
lowerCamelCase :Tuple = value
elif weight_type == "running_mean":
lowerCamelCase :List[Any] = value
elif weight_type == "running_var":
lowerCamelCase :str = value
elif weight_type == "num_batches_tracked":
lowerCamelCase :List[Any] = value
elif weight_type == "weight_ih_l0":
lowerCamelCase :Optional[Any] = value
elif weight_type == "weight_hh_l0":
lowerCamelCase :Tuple = value
elif weight_type == "bias_ih_l0":
lowerCamelCase :str = value
elif weight_type == "bias_hh_l0":
lowerCamelCase :Optional[int] = value
elif weight_type == "weight_ih_l1":
lowerCamelCase :Dict = value
elif weight_type == "weight_hh_l1":
lowerCamelCase :str = value
elif weight_type == "bias_ih_l1":
lowerCamelCase :Optional[Any] = value
elif weight_type == "bias_hh_l1":
lowerCamelCase :int = value
else:
lowerCamelCase :Optional[Any] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
def _lowerCamelCase ( a_ : Tuple , a_ : Any):
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
lowerCamelCase :Any = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCamelCase ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[Any]):
lowerCamelCase :Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase :str = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase :Any = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}")
for name, value in orig_dict.items():
if should_ignore(_A , _A):
logger.info(F"{name} was ignored")
continue
lowerCamelCase :int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase :Any = key.split('''.*.''')
if prefix in name and suffix in name:
lowerCamelCase :Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
lowerCamelCase :Optional[Any] = True
if "*" in mapped_key:
lowerCamelCase :Optional[Any] = name.split(_A)[0].split('''.''')[-2]
lowerCamelCase :str = mapped_key.replace('''*''' , _A)
if "weight_g" in name:
lowerCamelCase :Any = "weight_g"
elif "weight_v" in name:
lowerCamelCase :List[Any] = "weight_v"
elif "weight_ih_l0" in name:
lowerCamelCase :Union[str, Any] = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCamelCase :Dict = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCamelCase :Optional[Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCamelCase :Optional[int] = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCamelCase :Dict = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCamelCase :Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCamelCase :List[str] = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCamelCase :List[Any] = "bias_hh_l1"
elif "bias" in name:
lowerCamelCase :str = "bias"
elif "weight" in name:
lowerCamelCase :List[Any] = "weight"
elif "running_mean" in name:
lowerCamelCase :Any = "running_mean"
elif "running_var" in name:
lowerCamelCase :List[Any] = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase :str = "num_batches_tracked"
else:
lowerCamelCase :Optional[int] = None
set_recursively(_A , _A , _A , _A , _A)
continue
if not is_used:
unused_weights.append(_A)
logger.warning(F"Unused weights: {unused_weights}")
@torch.no_grad()
def _lowerCamelCase ( a_ : Tuple , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str]=None , a_ : Optional[int]=None , ):
if config_path is not None:
lowerCamelCase :str = EncodecConfig.from_pretrained(_A)
else:
lowerCamelCase :Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase :Union[str, Any] = [8, 5, 4, 4]
lowerCamelCase :Tuple = [2.2]
lowerCamelCase :int = 64
lowerCamelCase :List[Any] = 3_20_00
lowerCamelCase :Optional[int] = 20_48
lowerCamelCase :Optional[Any] = False
lowerCamelCase :int = False
lowerCamelCase :int = False
elif model_name == "encodec_48khz":
lowerCamelCase :Any = [8, 5, 4, 2]
lowerCamelCase :str = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase :Dict = 4_80_00
lowerCamelCase :Optional[int] = 2
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :str = "time_group_norm"
lowerCamelCase :int = True
lowerCamelCase :Optional[Any] = 1.0
lowerCamelCase :Union[str, Any] = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}")
lowerCamelCase :Dict = EncodecModel(_A)
lowerCamelCase :Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_A)
lowerCamelCase :Dict = torch.load(_A)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase :List[str] = original_checkpoint["best_state"]
recursively_load_weights(_A , _A , _A)
model.save_pretrained(_A)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(_A)
model.push_to_hub(_A)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
A__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __lowerCamelCase ):
_UpperCAmelCase = ['image_processor', 'tokenizer']
_UpperCAmelCase = 'CLIPImageProcessor'
_UpperCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : str=None , **__snake_case : int ):
lowerCamelCase :List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
lowerCamelCase :Optional[Any] = kwargs.pop('''feature_extractor''' )
lowerCamelCase :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : List[str] , __snake_case : Tuple=None , __snake_case : List[str]=None , __snake_case : Tuple=None , **__snake_case : Any ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase :Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
lowerCamelCase :Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCamelCase :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def snake_case ( self : str , *__snake_case : str , **__snake_case : Optional[int] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def snake_case ( self : str , *__snake_case : List[Any] , **__snake_case : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def snake_case ( self : Optional[Any] ):
lowerCamelCase :str = self.tokenizer.model_input_names
lowerCamelCase :List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self : int ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def snake_case ( self : str ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : Dict , __snake_case : List[str]=13 , __snake_case : Optional[Any]=7 , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=99 , __snake_case : List[Any]=16 , __snake_case : str=36 , __snake_case : Optional[int]=6 , __snake_case : List[Any]=6 , __snake_case : Optional[int]=6 , __snake_case : List[str]=37 , __snake_case : Tuple="gelu" , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Any=16 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0_2 , __snake_case : Tuple=3 , __snake_case : Any=4 , __snake_case : Tuple=None , ):
lowerCamelCase :List[Any] = parent
lowerCamelCase :Union[str, Any] = batch_size
lowerCamelCase :Any = seq_length
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Any = use_input_mask
lowerCamelCase :Optional[int] = use_token_type_ids
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Union[str, Any] = embedding_size
lowerCamelCase :Union[str, Any] = hidden_size
lowerCamelCase :str = num_hidden_layers
lowerCamelCase :str = num_hidden_groups
lowerCamelCase :int = num_attention_heads
lowerCamelCase :int = intermediate_size
lowerCamelCase :str = hidden_act
lowerCamelCase :List[Any] = hidden_dropout_prob
lowerCamelCase :Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase :List[str] = max_position_embeddings
lowerCamelCase :Any = type_vocab_size
lowerCamelCase :Any = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Optional[Any] = num_labels
lowerCamelCase :List[str] = num_choices
lowerCamelCase :Tuple = scope
def snake_case ( self : Optional[int] ):
lowerCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Tuple = None
if self.use_input_mask:
lowerCamelCase :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Tuple = None
if self.use_token_type_ids:
lowerCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :Any = None
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :Optional[Any] = None
if self.use_labels:
lowerCamelCase :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase :Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Any ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict ):
lowerCamelCase :Dict = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowerCamelCase :Dict = model(_lowercase , token_type_ids=_lowercase )
lowerCamelCase :str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self : List[str] , __snake_case : str , __snake_case : str , __snake_case : List[str] , __snake_case : int , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] ):
lowerCamelCase :Optional[int] = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case ( self : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Optional[int] , __snake_case : int ):
lowerCamelCase :Optional[Any] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Dict = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str , __snake_case : Any ):
lowerCamelCase :Optional[Any] = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ):
lowerCamelCase :Dict = self.num_labels
lowerCamelCase :Union[str, Any] = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] ):
lowerCamelCase :List[Any] = self.num_labels
lowerCamelCase :Optional[Any] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :List[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[Any] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Any , __snake_case : int , __snake_case : int , __snake_case : Any ):
lowerCamelCase :List[Any] = self.num_choices
lowerCamelCase :Tuple = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase :Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase :str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase :List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : List[str] ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
(
lowerCamelCase
) :int = config_and_inputs
lowerCamelCase :Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_UpperCAmelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict=False ):
lowerCamelCase :List[Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowerCamelCase :Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowerCamelCase :Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = AlbertModelTester(self )
lowerCamelCase :str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def snake_case ( self : str ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase :Tuple = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def snake_case ( self : List[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = AlbertModel.from_pretrained('''albert-base-v2''' )
lowerCamelCase :int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase :List[Any] = model(_lowercase , attention_mask=_lowercase )[0]
lowerCamelCase :str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
lowerCamelCase :Tuple = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
A__ = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
A__ = {
"""ctrl""": 256,
}
A__ = {
"""Pregnancy""": 168_629,
"""Christianity""": 7_675,
"""Explain""": 106_423,
"""Fitness""": 63_440,
"""Saving""": 63_163,
"""Ask""": 27_171,
"""Ass""": 95_985,
"""Joke""": 163_509,
"""Questions""": 45_622,
"""Thoughts""": 49_605,
"""Retail""": 52_342,
"""Feminism""": 164_338,
"""Writing""": 11_992,
"""Atheism""": 192_263,
"""Netflix""": 48_616,
"""Computing""": 39_639,
"""Opinion""": 43_213,
"""Alone""": 44_967,
"""Funny""": 58_917,
"""Gaming""": 40_358,
"""Human""": 4_088,
"""India""": 1_331,
"""Joker""": 77_138,
"""Diet""": 36_206,
"""Legal""": 11_859,
"""Norman""": 4_939,
"""Tip""": 72_689,
"""Weight""": 52_343,
"""Movies""": 46_273,
"""Running""": 23_425,
"""Science""": 2_090,
"""Horror""": 37_793,
"""Confession""": 60_572,
"""Finance""": 12_250,
"""Politics""": 16_360,
"""Scary""": 191_985,
"""Support""": 12_654,
"""Technologies""": 32_516,
"""Teenage""": 66_160,
"""Event""": 32_769,
"""Learned""": 67_460,
"""Notion""": 182_770,
"""Wikipedia""": 37_583,
"""Books""": 6_665,
"""Extract""": 76_050,
"""Confessions""": 102_701,
"""Conspiracy""": 75_932,
"""Links""": 63_674,
"""Narcissus""": 150_425,
"""Relationship""": 54_766,
"""Relationships""": 134_796,
"""Reviews""": 41_671,
"""News""": 4_256,
"""Translation""": 26_820,
"""multilingual""": 128_406,
}
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :Tuple = set()
lowerCamelCase :Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCamelCase :Any = char
lowerCamelCase :Union[str, Any] = set(__A)
return pairs
class _lowerCAmelCase ( __A ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTROL_CODES
def __init__( self : Dict , __snake_case : Tuple , __snake_case : int , __snake_case : Dict="<unk>" , **__snake_case : Tuple ):
super().__init__(unk_token=__snake_case , **__snake_case )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase :List[Any] = json.load(__snake_case )
lowerCamelCase :Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase :Tuple = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase :List[str] = [tuple(merge.split() ) for merge in merges]
lowerCamelCase :Tuple = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Dict = {}
@property
def snake_case ( self : Optional[Any] ):
return len(self.encoder )
def snake_case ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : str , __snake_case : Optional[int] ):
if token in self.cache:
return self.cache[token]
lowerCamelCase :Dict = tuple(__snake_case )
lowerCamelCase :Any = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase :Dict = get_pairs(__snake_case )
if not pairs:
return token
while True:
lowerCamelCase :List[Any] = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase :Tuple = bigram
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Dict = 0
while i < len(__snake_case ):
try:
lowerCamelCase :List[Any] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase :int = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase :int = tuple(__snake_case )
lowerCamelCase :Optional[Any] = new_word
if len(__snake_case ) == 1:
break
else:
lowerCamelCase :int = get_pairs(__snake_case )
lowerCamelCase :List[str] = '''@@ '''.join(__snake_case )
lowerCamelCase :Optional[Any] = word[:-4]
lowerCamelCase :Union[str, Any] = word
return word
def snake_case ( self : int , __snake_case : List[str] ):
lowerCamelCase :int = []
lowerCamelCase :Any = re.findall(R'''\S+\n?''' , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(''' ''' ) ) )
return split_tokens
def snake_case ( self : int , __snake_case : str ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def snake_case ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def snake_case ( self : Optional[Any] , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = ''' '''.join(__snake_case ).replace('''@@ ''' , '''''' ).strip()
return out_string
def snake_case ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
lowerCamelCase :Dict = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase :List[str] = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *__snake_case : str , __snake_case : Any=None , __snake_case : Any=None , **__snake_case : Union[str, Any] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase :List[str] = eval_examples
lowerCamelCase :Optional[Any] = post_process_function
def snake_case ( self : List[str] , __snake_case : Dict = None , __snake_case : List[str]=None , __snake_case : List[Any] = None , __snake_case : List[Any] = "eval" , **__snake_case : Optional[int] , ):
lowerCamelCase :Optional[int] = gen_kwargs.copy()
lowerCamelCase :List[Any] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowerCamelCase :List[str] = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowerCamelCase :str = gen_kwargs
lowerCamelCase :Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase :str = self.get_eval_dataloader(_lowerCAmelCase )
lowerCamelCase :List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase :str = self.compute_metrics
lowerCamelCase :Any = None
lowerCamelCase :Optional[int] = time.time()
lowerCamelCase :Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase :str = eval_loop(
_lowerCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
lowerCamelCase :Dict = compute_metrics
lowerCamelCase :str = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase :Union[str, Any] = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase :str = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCamelCase :Dict = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
lowerCamelCase :Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase :Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def snake_case ( self : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any]=None , __snake_case : Dict = "test" , **__snake_case : str ):
lowerCamelCase :Optional[Any] = gen_kwargs.copy()
lowerCamelCase :Any = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase :str = self.compute_metrics
lowerCamelCase :Optional[int] = None
lowerCamelCase :Union[str, Any] = time.time()
lowerCamelCase :str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase :str = eval_loop(
_lowerCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
lowerCamelCase :List[str] = compute_metrics
lowerCamelCase :Dict = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase :int = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , '''predict''' )
lowerCamelCase :Tuple = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCamelCase :int = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[str]):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''')
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''')
lowerCamelCase :Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__))
return round(lowerCamelCase__ , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
def _lowerCamelCase ( a_ : int , a_ : bool = False):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''')
# array bounds provided by analysis
lowerCamelCase :Any = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase :Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCAmelCase , 1):
if n < _p:
# then we have our last prime to check
lowerCamelCase :str = primes[:idx]
break
lowerCamelCase :List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase :int = False
for r in range(_lowerCAmelCase):
lowerCamelCase :Any = pow(_lowerCAmelCase , d * 2**r , _lowerCAmelCase)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase :Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCamelCase ( ):
assert not miller_rabin(5_61)
assert miller_rabin(5_63)
# 2047
assert not miller_rabin(83_82_01)
assert miller_rabin(83_82_07)
# 1_373_653
assert not miller_rabin(17_31_60_01)
assert miller_rabin(17_31_60_17)
# 25_326_001
assert not miller_rabin(30_78_38_66_41)
assert miller_rabin(30_78_38_66_53)
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01)
assert miller_rabin(1_71_30_45_57_48_19)
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07)
assert miller_rabin(2_77_97_99_72_83_27)
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41)
assert miller_rabin(1_13_85_00_23_90_95_27)
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51)
assert miller_rabin(1_27_50_41_01_88_48_80_43_91)
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67)
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51)
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33)
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59)
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int]=False):
try:
lowerCamelCase :Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase :List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase :Tuple = strtobool(__lowerCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no.")
return _value
A__ = parse_flag_from_env("""RUN_SLOW""", default=False)
def _lowerCamelCase ( a_ : Any):
return unittest.skip('''Test was skipped''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Optional[Any]):
return unittest.skipUnless(_run_slow_tests , '''test is slow''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Dict):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Union[str, Any]):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Union[str, Any]):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : List[str]):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : int):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Union[str, Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Optional[int]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Any):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Dict):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Union[str, Any]):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Any):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Any):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''') , '''test requires torch version >= 1.12.0''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Any=None , a_ : Any=None):
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase)
return unittest.skipUnless(is_torch_version('''>=''' , __lowerCAmelCase) , F"test requires torch version >= {version}")(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Tuple):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : Tuple):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''')(__lowerCAmelCase)
def _lowerCamelCase ( a_ : str):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''')(__lowerCAmelCase)
A__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowerCamelCase ( a_ : Tuple):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__lowerCAmelCase)
class _lowerCAmelCase ( unittest.TestCase ):
_UpperCAmelCase = True
@classmethod
def snake_case ( cls : Optional[Any] ):
lowerCamelCase :int = tempfile.mkdtemp()
@classmethod
def snake_case ( cls : Union[str, Any] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self : str ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[Any] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[Any] , __snake_case : Dict ):
lowerCamelCase :Union[str, Any] = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowerCamelCase ( a_ : Any):
lowerCamelCase :Optional[int] = AcceleratorState()
lowerCamelCase :Union[str, Any] = tensor[None].clone().to(state.device)
lowerCamelCase :Optional[Any] = gather(__lowerCAmelCase).cpu()
lowerCamelCase :List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , __lowerCAmelCase):
return False
return True
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] ):
lowerCamelCase :Tuple = returncode
lowerCamelCase :List[Any] = stdout
lowerCamelCase :Tuple = stderr
async def _lowerCamelCase ( a_ : int , a_ : List[Any]):
while True:
lowerCamelCase :Optional[int] = await stream.readline()
if line:
callback(__lowerCAmelCase)
else:
break
async def _lowerCamelCase ( a_ : List[Any] , a_ : Tuple=None , a_ : List[Any]=None , a_ : str=None , a_ : Tuple=False , a_ : Union[str, Any]=False):
if echo:
print('''\nRunning: ''' , ''' '''.join(__lowerCAmelCase))
lowerCamelCase :Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase :Optional[int] = []
lowerCamelCase :Union[str, Any] = []
def tee(a_ : Optional[Any] , a_ : List[Any] , a_ : Optional[Any] , a_ : Any=""):
lowerCamelCase :int = line.decode('''utf-8''').rstrip()
sink.append(__lowerCAmelCase)
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a_: tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='''stdout:'''))),
asyncio.create_task(_read_stream(p.stderr , lambda a_: tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='''stderr:'''))),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase)
def _lowerCamelCase ( a_ : List[Any] , a_ : Union[str, Any]=None , a_ : List[Any]=None , a_ : Optional[Any]=1_80 , a_ : List[str]=False , a_ : Any=True):
lowerCamelCase :Optional[int] = asyncio.get_event_loop()
lowerCamelCase :Union[str, Any] = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase))
lowerCamelCase :Any = """ """.join(__lowerCAmelCase)
if result.returncode > 0:
lowerCamelCase :int = """\n""".join(result.stderr)
raise RuntimeError(
F"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}")
return result
class _lowerCAmelCase ( UpperCamelCase_ ):
pass
def _lowerCamelCase ( a_ : List[Any] , a_ : Tuple=False):
try:
lowerCamelCase :Any = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(__lowerCAmelCase , '''decode'''):
lowerCamelCase :Tuple = output.decode('''utf-8''')
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__lowerCAmelCase)}` failed with the following error:\n\n{e.output.decode()}") from e
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : int ):
lowerCamelCase :Any = value
lowerCamelCase :Tuple = None
lowerCamelCase :Optional[int] = None
class _lowerCAmelCase :
def __init__( self : int , __snake_case : Node ):
lowerCamelCase :Any = tree
def snake_case ( self : Optional[int] , __snake_case : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[str] ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
A__ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def _lowerCAmelCase ( a_ : Tuple):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase :int = list(s_dict.keys())
for key in keys:
lowerCamelCase :List[Any] = R'''.*/layers_(\d+)'''
lowerCamelCase :int = key
if re.match(lowerCAmelCase__ , lowerCAmelCase__):
lowerCamelCase :Tuple = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , lowerCAmelCase__)
lowerCamelCase :List[str] = R'''(encoder|decoder)\/'''
if re.match(lowerCAmelCase__ , lowerCAmelCase__):
lowerCamelCase :str = re.match(lowerCAmelCase__ , lowerCAmelCase__).groups()
if groups[0] == "encoder":
lowerCamelCase :Dict = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , lowerCAmelCase__)
lowerCamelCase :Tuple = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , lowerCAmelCase__)
elif groups[0] == "decoder":
lowerCamelCase :List[str] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , lowerCAmelCase__)
lowerCamelCase :List[str] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , lowerCAmelCase__)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase :Dict = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__)
print(F"{key} -> {new_key}")
lowerCamelCase :Optional[int] = s_dict.pop(lowerCAmelCase__)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase :List[Any] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase :List[str] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
lowerCamelCase :str = s_dict[key].shape[0]
lowerCamelCase :Optional[int] = s_dict[key]
for idx in range(lowerCAmelCase__):
lowerCamelCase :str = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring')}")
s_dict.pop(lowerCAmelCase__)
return s_dict
A__ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def _lowerCAmelCase ( a_ : Optional[Any] , a_ : Optional[Any]):
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCAmelCase__ , '''r''') as f:
lowerCamelCase :Any = f.read()
lowerCamelCase :Optional[int] = re.findall(R'''(.*) = ([0-9.]*)''' , lowerCAmelCase__)
lowerCamelCase :List[str] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase :List[Any] = float(lowerCAmelCase__) if '''.''' in value else int(lowerCAmelCase__)
lowerCamelCase :Optional[int] = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , lowerCAmelCase__)[0]
lowerCamelCase :List[str] = str(activation[1])
lowerCamelCase :Optional[int] = num_experts
lowerCamelCase :int = SwitchTransformersConfig(**lowerCAmelCase__)
return config
def _lowerCAmelCase ( a_ : int , a_ : Optional[int] , a_ : Optional[int]=None , a_ : Tuple="./" , a_ : Dict=8):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}")
lowerCamelCase :Optional[int] = checkpoints.load_tax_checkpoint(lowerCAmelCase__)
if gin_file is not None:
lowerCamelCase :Any = convert_gin_to_config(lowerCAmelCase__ , lowerCAmelCase__)
else:
lowerCamelCase :List[Any] = SwitchTransformersConfig.from_pretrained(lowerCAmelCase__)
lowerCamelCase :Any = SwitchTransformersForConditionalGeneration(lowerCAmelCase__)
lowerCamelCase :Tuple = flax_params['''target''']
lowerCamelCase :str = flatten_dict(lowerCAmelCase__ , sep='''/''')
lowerCamelCase :Optional[int] = rename_keys(lowerCAmelCase__)
lowerCamelCase :int = unflatten_dict(lowerCAmelCase__ , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__)
print(F"Save PyTorch model to {pytorch_dump_path}")
pt_model.save_pretrained(lowerCAmelCase__)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
A__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCamelCase ( a_ : List[Any] , a_ : str , a_ : str = False):
if radian_mode:
return [magnitude * cos(__A), magnitude * sin(__A)]
return [magnitude * cos(radians(__A)), magnitude * sin(radians(__A))]
def _lowerCamelCase ( a_ : Dict , a_ : Dict , a_ : List[str] = 10**-1):
lowerCamelCase :NDArray[floataa] = cross(__A , __A)
lowerCamelCase :float = sum(__A)
return abs(__A) < eps
if __name__ == "__main__":
# Test to check if it works
A__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A__ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
A__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _lowerCamelCase ( a_ : List[Any] , a_ : int , a_ : Dict):
lowerCamelCase :List[str] = state_dict.pop(_lowerCamelCase)
lowerCamelCase :Tuple = val
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''')
lowerCamelCase :Dict = value
else:
lowerCamelCase :Any = value
return new_state_dict
def _lowerCamelCase ( a_ : Optional[int] , a_ : Tuple=False):
lowerCamelCase :Optional[Any] = ''
if is_panoptic:
lowerCamelCase :Optional[Any] = 'conditional_detr.'
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase :Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
lowerCamelCase :Tuple = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Dict = in_proj_weight[:2_56, :]
lowerCamelCase :List[Any] = in_proj_bias[:2_56]
lowerCamelCase :Dict = in_proj_weight[2_56:5_12, :]
lowerCamelCase :Union[str, Any] = in_proj_bias[2_56:5_12]
lowerCamelCase :Any = in_proj_weight[-2_56:, :]
lowerCamelCase :Any = in_proj_bias[-2_56:]
def _lowerCamelCase ( ):
lowerCamelCase :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase :Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[int]):
lowerCamelCase :List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase :Dict = 'resnet101'
if "dc5" in model_name:
lowerCamelCase :List[Any] = True
lowerCamelCase :Dict = 'panoptic' in model_name
if is_panoptic:
lowerCamelCase :List[str] = 2_50
else:
lowerCamelCase :str = 91
lowerCamelCase :Any = 'huggingface/label-files'
lowerCamelCase :List[str] = 'coco-detection-id2label.json'
lowerCamelCase :int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='''dataset''') , '''r'''))
lowerCamelCase :str = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowerCamelCase :Union[str, Any] = idalabel
lowerCamelCase :Any = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase :Optional[int] = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCamelCase :str = ConditionalDetrImageProcessor(format=_lowerCamelCase)
# prepare image
lowerCamelCase :List[str] = prepare_img()
lowerCamelCase :int = image_processor(images=_lowerCamelCase , return_tensors='''pt''')
lowerCamelCase :Dict = encoding['pixel_values']
logger.info(F"Converting model {model_name}...")
# load original model from torch hub
lowerCamelCase :Any = torch.hub.load('''DeppMeng/ConditionalDETR''' , _lowerCamelCase , pretrained=_lowerCamelCase).eval()
lowerCamelCase :Union[str, Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase :Optional[Any] = 'conditional_detr.' + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowerCamelCase :Union[str, Any] = rename_backbone_keys(_lowerCamelCase)
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase :Dict = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
lowerCamelCase :List[Any] = state_dict.pop(_lowerCamelCase)
lowerCamelCase :Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase :Any = state_dict.pop(_lowerCamelCase)
lowerCamelCase :Dict = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
lowerCamelCase :str = state_dict.pop(_lowerCamelCase)
lowerCamelCase :List[str] = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
lowerCamelCase :Dict = state_dict.pop(_lowerCamelCase)
lowerCamelCase :List[Any] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase :str = ConditionalDetrForSegmentation(_lowerCamelCase) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization='''DepuMeng''' , commit_message='''Add model''')
# verify our conversion
lowerCamelCase :List[Any] = conditional_detr(_lowerCamelCase)
lowerCamelCase :Dict = model(_lowerCamelCase)
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4)
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4)
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCAmelCase ( _A ):
_UpperCAmelCase = 'vivit'
def __init__( self : str , __snake_case : Dict=224 , __snake_case : Union[str, Any]=32 , __snake_case : int=[2, 16, 16] , __snake_case : Dict=3 , __snake_case : Dict=768 , __snake_case : Dict=12 , __snake_case : Tuple=12 , __snake_case : Any=3072 , __snake_case : List[str]="gelu_fast" , __snake_case : Optional[Any]=0.0 , __snake_case : Tuple=0.0 , __snake_case : Dict=0.0_2 , __snake_case : Optional[Any]=1e-0_6 , __snake_case : Union[str, Any]=True , **__snake_case : str , ):
lowerCamelCase :Union[str, Any] = hidden_size
lowerCamelCase :str = num_hidden_layers
lowerCamelCase :int = num_attention_heads
lowerCamelCase :str = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :Optional[int] = attention_probs_dropout_prob
lowerCamelCase :int = initializer_range
lowerCamelCase :List[Any] = layer_norm_eps
lowerCamelCase :Optional[Any] = image_size
lowerCamelCase :Union[str, Any] = num_frames
lowerCamelCase :Any = tubelet_size
lowerCamelCase :int = num_channels
lowerCamelCase :List[str] = qkv_bias
super().__init__(**__lowerCamelCase )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase :Optional[int] = Dataset.from_dict(a_)
return dataset
class _lowerCAmelCase ( _UpperCAmelCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :Any = get_dataset()
lowerCamelCase :int = make_duplicate_clusters(lowerCamelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case ( self : List[str] ):
lowerCamelCase :List[Any] = get_dataset()
lowerCamelCase :Optional[Any] = deduplicate_dataset(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 2 )
print(lowerCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCamelCase_ )
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
import heapq
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :str = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_A , [-1 * len(_A), (key, value)])
# chosen_vertices = set of chosen vertices
lowerCamelCase :Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase :Any = heapq.heappop(_A)[1][0]
chosen_vertices.add(_A)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase :Optional[Any] = elem[1][1].index(_A)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_A)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
def _lowerCamelCase ( a_ : Union[str, Any] = 50_00_00_00):
lowerCamelCase :str = set()
lowerCamelCase :List[str] = int((limit - 24) ** (1 / 2))
lowerCamelCase :Union[str, Any] = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_)))
for primea in primes:
lowerCamelCase :str = primea * primea
for primea in primes:
lowerCamelCase :Optional[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase :Optional[int] = primea * primea * primea * primea
lowerCamelCase :List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_)
return len(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.