code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCamelCase : str = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : int , A_ : int , A_ : float , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = feature_size
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = padding_value
lowerCamelCase_ = kwargs.pop('padding_side' , 'right' )
lowerCamelCase_ = kwargs.pop('return_attention_mask' , A_ )
super().__init__(**A_ )
def a__ ( self : Tuple , A_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A_ : Union[bool, str, PaddingStrategy] = True , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(A_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCamelCase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
lowerCamelCase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A_ ) == 0:
if return_attention_mask:
lowerCamelCase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCamelCase_ = required_input[0]
if isinstance(A_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCamelCase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A_ ):
lowerCamelCase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A_ ):
lowerCamelCase_ = 'tf'
elif is_torch_tensor(A_ ):
lowerCamelCase_ = 'pt'
elif isinstance(A_ , (int, float, list, tuple, np.ndarray) ):
lowerCamelCase_ = 'np'
else:
raise ValueError(
f"""type of {first_element} unknown: {type(A_ )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCamelCase_ = to_numpy(A_ )
else:
lowerCamelCase_ = [to_numpy(A_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCamelCase_ = self._get_padding_strategies(padding=A_ , max_length=A_ )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
lowerCamelCase_ = len(A_ )
if not all(len(A_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
lowerCamelCase_ = []
for i in range(A_ ):
lowerCamelCase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCamelCase_ = self._truncate(
A_ , max_length=A_ , pad_to_multiple_of=A_ , truncation=A_ , )
truncated_inputs.append(A_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCamelCase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = {}
for i in range(A_ ):
# padding
lowerCamelCase_ = self._pad(
truncated_inputs[i] , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCamelCase_ = []
if value.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = value.astype(np.floataa )
batch_outputs[key].append(A_ )
return BatchFeature(A_ , tensor_type=A_ )
def a__ ( self : Any , A_ : Union[Dict[str, np.ndarray], BatchFeature] , A_ : Optional[int] = None , A_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A_ : Optional[int] = None , A_ : Optional[bool] = None , ) -> dict:
"""simple docstring"""
lowerCamelCase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCamelCase_ = len(A_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCamelCase_ = np.ones(len(A_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCamelCase_ = max_length - len(A_ )
if self.padding_side == "right":
if return_attention_mask:
lowerCamelCase_ = np.pad(
processed_features['attention_mask'] , (0, difference) )
lowerCamelCase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCamelCase_ = np.pad(
A_ , A_ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCamelCase_ = np.pad(
processed_features['attention_mask'] , (difference, 0) )
lowerCamelCase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCamelCase_ = np.pad(
A_ , A_ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def a__ ( self : Optional[Any] , A_ : Union[Dict[str, np.ndarray], BatchFeature] , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : Optional[bool] = None , ) -> Any:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase_ = len(A_ ) > max_length
if needs_to_be_truncated:
lowerCamelCase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCamelCase_ = processed_features['attention_mask'][:max_length]
return processed_features
def a__ ( self : Optional[Any] , A_ : Optional[int]=False , A_ : Any=None ) -> Optional[Any]:
"""simple docstring"""
if padding is not False:
if padding is True:
lowerCamelCase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A_ , A_ ):
lowerCamelCase_ = PaddingStrategy(A_ )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = padding
else:
lowerCamelCase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 70 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__() -> str:
'''simple docstring'''
lowerCamelCase__ = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__snake_case )
lowerCamelCase__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__snake_case )
env_command_parser(subparsers=__snake_case )
launch_command_parser(subparsers=__snake_case )
tpu_command_parser(subparsers=__snake_case )
test_command_parser(subparsers=__snake_case )
# Let's go
lowerCamelCase__ = parser.parse_args()
if not hasattr(__snake_case ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__snake_case )
if __name__ == "__main__":
main()
| 481 | 0 |
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self : int , __lowerCAmelCase : bool = True) -> None:
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : T , __lowerCAmelCase : T) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
self.adj_list[destination_vertex].append(__UpperCamelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCamelCase)
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase)
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self : List[str]) -> str:
return pformat(self.adj_list)
| 707 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ ='lxmert'
lowerCamelCase_ ={}
def __init__( self : str , __lowerCAmelCase : str=3_0522 , __lowerCAmelCase : Dict=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : List[str]=9500 , __lowerCAmelCase : Optional[int]=1600 , __lowerCAmelCase : Tuple=400 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : int=1e-12 , __lowerCAmelCase : Union[str, Any]=9 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Optional[int]=6.67 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , **__lowerCAmelCase : Dict , ) -> Dict:
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = num_qa_labels
lowercase_ = num_object_labels
lowercase_ = num_attr_labels
lowercase_ = l_layers
lowercase_ = x_layers
lowercase_ = r_layers
lowercase_ = visual_feat_dim
lowercase_ = visual_pos_dim
lowercase_ = visual_loss_normalizer
lowercase_ = task_matched
lowercase_ = task_mask_lm
lowercase_ = task_obj_predict
lowercase_ = task_qa
lowercase_ = visual_obj_loss
lowercase_ = visual_attr_loss
lowercase_ = visual_feat_loss
lowercase_ = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__lowerCAmelCase)
| 461 | 0 |
class __magic_name__ :
def __init__( self , __snake_case ) -> None:
'''simple docstring'''
__a =size
__a =[0] * size
__a =[0] * size
@staticmethod
def __magic_name__ ( __snake_case ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( __snake_case ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a =value
while index < self.size:
__a =self.get_prev(__snake_case ) + 1
if current_left_border == index:
__a =value
else:
__a =max(__snake_case , __snake_case , __snake_case )
__a =self.get_next(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
__a =0
while left <= right:
__a =self.get_prev(__snake_case )
if left <= current_left:
__a =max(__snake_case , self.tree[right] )
__a =current_left
else:
__a =max(__snake_case , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[int] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["LayoutLMv3FeatureExtractor"]
_lowerCAmelCase : List[Any] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 242 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( UpperCamelCase__ ):
_lowercase : Dict = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_lowercase : Union[str, Any] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_lowercase : str = '''document_qa'''
_lowercase : Any = AutoProcessor
_lowercase : str = VisionEncoderDecoderModel
_lowercase : int = ['''image''', '''text''']
_lowercase : Union[str, Any] = ['''text''']
def __init__( self: Dict , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: "Image" , UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
lowercase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase__ = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowercase__ = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowercase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase__ = re.sub(r'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowercase__ = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"] | 714 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(len(SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [v / w for v, w in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
for i in index:
if weight[i] <= capacity:
lowercase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[str] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "mctct"
def __init__( self , UpperCamelCase__=8_065 , UpperCamelCase__=1_536 , UpperCamelCase__=36 , UpperCamelCase__=6_144 , UpperCamelCase__=4 , UpperCamelCase__=384 , UpperCamelCase__=920 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.3 , UpperCamelCase__="relu" , UpperCamelCase__=0.02 , UpperCamelCase__=0.3 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=(7,) , UpperCamelCase__=(3,) , UpperCamelCase__=80 , UpperCamelCase__=1 , UpperCamelCase__=None , UpperCamelCase__="sum" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layerdrop
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = conv_glu_dim
lowerCamelCase_ = conv_dropout
lowerCamelCase_ = num_conv_layers
lowerCamelCase_ = input_feat_per_channel
lowerCamelCase_ = input_channels
lowerCamelCase_ = conv_channels
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ = list(UpperCamelCase__ )
lowerCamelCase_ = list(UpperCamelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 142 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**_a : Tuple ):
'''simple docstring'''
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self : Optional[Any] ,_a : Union[np.ndarray, bytes, str] ,**_a : List[Any] ):
'''simple docstring'''
return super().__call__(_a ,**_a )
def __lowercase ( self : Union[str, Any] ,**_a : Dict ):
'''simple docstring'''
_a : List[str] = {}
if "candidate_labels" in kwargs:
_a : Optional[int] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_a : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowercase ( self : str ,_a : Optional[int] ,_a : Optional[int]=None ,_a : Any="This is a sound of {}." ):
'''simple docstring'''
if isinstance(_a ,_a ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_a : Any = requests.get(_a ).content
else:
with open(_a ,'rb' ) as f:
_a : List[str] = f.read()
if isinstance(_a ,_a ):
_a : Union[str, Any] = ffmpeg_read(_a ,self.feature_extractor.sampling_rate )
if not isinstance(_a ,np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_a : Optional[int] = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt' )
_a : int = candidate_labels
_a : Optional[Any] = [hypothesis_template.format(_a ) for x in candidate_labels]
_a : Union[str, Any] = self.tokenizer(_a ,return_tensors=self.framework ,padding=_a )
_a : Dict = [text_inputs]
return inputs
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
_a : int = model_inputs.pop('candidate_labels' )
_a : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,_a ):
_a : List[str] = text_inputs[0]
else:
# Batching case.
_a : List[str] = text_inputs[0][0]
_a : Dict = self.model(**_a ,**_a )
_a : Dict = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __lowercase ( self : List[str] ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = model_outputs.pop('candidate_labels' )
_a : int = model_outputs['logits'][0]
if self.framework == "pt":
_a : Optional[Any] = logits.softmax(dim=0 )
_a : Dict = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_a : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_a ,_a ) ,key=lambda _a : -x[0] )
]
return result
| 707 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : List[Any] = {}
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
_a : Any = {}
def __lowercase ( self : int ,_a : str ,_a : str ,_a : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(_a )
if nodea not in self.connections:
self.add_node(_a )
_a : int = probability
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return list(self.connections )
def __lowercase ( self : List[Any] ,_a : str ):
'''simple docstring'''
_a : Any = 0
_a : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase_ (__a : str , __a : list[tuple[str, str, float]] , __a : int ):
"""simple docstring"""
_a : str = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
_a : Dict = Counter(graph.get_nodes() )
_a : List[str] = start
for _ in range(__a ):
_a : Dict = graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
snake_case = 6_5_5_2_1
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = 1
_snake_case = 0
for plain_chr in plain_text:
_snake_case = (a + ord(lowerCAmelCase_ )) % MOD_ADLER
_snake_case = (b + a) % MOD_ADLER
return (b << 16) | a
| 103 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_SCREAMING_SNAKE_CASE = v.half()
if save_path is None: # overwrite src_path
_SCREAMING_SNAKE_CASE = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 418 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , **lowerCAmelCase_ ):
requires_backends(self , ["""bs4"""] )
super().__init__(**lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : Tuple = []
A_ : Union[str, Any] = []
A_ : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A_ : Optional[Any] = parent.find_all(child.name , recursive=lowerCAmelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase_ ) else next(i for i, s in enumerate(lowerCAmelCase_ , 1 ) if s is child ) )
A_ : List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[Any] = BeautifulSoup(lowerCAmelCase_ , """html.parser""" )
A_ : Union[str, Any] = []
A_ : str = []
A_ : Dict = []
for element in html_code.descendants:
if type(lowerCAmelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A_ : Tuple = html.unescape(lowerCAmelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase_ )
A_ , A_ : List[Any] = self.xpath_soup(lowerCAmelCase_ )
stringaxtag_seq.append(lowerCAmelCase_ )
stringaxsubs_seq.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Optional[int] = """"""
for tagname, subs in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__(self , lowerCAmelCase_ ):
A_ : Any = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Union[str, Any] = True
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
if len(lowerCAmelCase_ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase_ ):
A_ : int = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"""but is of type {type(lowerCAmelCase_ )}.""" )
A_ : List[Any] = bool(isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase_ )) )
if not is_batched:
A_ : List[Any] = [html_strings]
# Get nodes + xpaths
A_ : Any = []
A_ : List[str] = []
for html_string in html_strings:
A_ , A_ , A_ : int = self.get_three_from_single(lowerCAmelCase_ )
nodes.append(lowerCAmelCase_ )
A_ : Dict = []
for node, tag_list, sub_list in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : int = self.construct_xpath(lowerCAmelCase_ , lowerCAmelCase_ )
xpath_strings.append(lowerCAmelCase_ )
xpaths.append(lowerCAmelCase_ )
# return as Dict
A_ : Tuple = {"""nodes""": nodes, """xpaths""": xpaths}
A_ : List[Any] = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
return encoded_inputs
| 480 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Union[str, Any] = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> bool:
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
'''simple docstring'''
__lowercase = max_length
__lowercase = max_position_embeddings
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> bool:
'''simple docstring'''
__lowercase = input_ids.shape[-1]
__lowercase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , lowerCAmelCase__ , )
__lowercase = start_length
__lowercase = max_new_tokens
__lowercase = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
__lowercase = max_time
__lowercase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return any(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) for criteria in self )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = stopping_criteria.max_length
__lowercase = deepcopy(lowercase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , lowercase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowercase ) )
return new_stopping_criteria | 534 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = '''audio-spectrogram-transformer'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=16 , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=10 , lowerCAmelCase__=10_24 , lowerCAmelCase__=1_28 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins | 534 | 1 |
'''simple docstring'''
import random
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = [], [], []
for element in data:
if element < pivot:
less.append(lowerCamelCase_ )
elif element > pivot:
greater.append(lowerCamelCase_ )
else:
equal.append(lowerCamelCase_ )
return less, equal, greater
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if index >= len(lowerCamelCase_ ) or index < 0:
return None
lowerCAmelCase__ : List[str] = items[random.randint(0 , len(lowerCamelCase_ ) - 1 )]
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = _partition(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCamelCase_ , lowerCamelCase_ )
# must be in larger
else:
return quick_select(lowerCamelCase_ , index - (m + count) )
| 568 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = """lxmert"""
A_ : List[str] = {}
def __init__( self : Optional[int] , a__ : int=3_0522 , a__ : Optional[int]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=9500 , a__ : List[Any]=1600 , a__ : Optional[int]=400 , a__ : List[Any]=3072 , a__ : Any="gelu" , a__ : int=0.1 , a__ : str=0.1 , a__ : List[str]=512 , a__ : int=2 , a__ : List[Any]=0.02 , a__ : List[Any]=1e-12 , a__ : List[Any]=9 , a__ : Union[str, Any]=5 , a__ : Any=5 , a__ : List[str]=2048 , a__ : Union[str, Any]=4 , a__ : Optional[Any]=6.67 , a__ : Any=True , a__ : Optional[int]=True , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[str]=True , a__ : str=True , a__ : int=True , **a__ : int , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = num_qa_labels
lowerCAmelCase__ : int = num_object_labels
lowerCAmelCase__ : List[str] = num_attr_labels
lowerCAmelCase__ : Optional[int] = l_layers
lowerCAmelCase__ : Any = x_layers
lowerCAmelCase__ : Any = r_layers
lowerCAmelCase__ : str = visual_feat_dim
lowerCAmelCase__ : List[str] = visual_pos_dim
lowerCAmelCase__ : str = visual_loss_normalizer
lowerCAmelCase__ : Optional[int] = task_matched
lowerCAmelCase__ : Optional[Any] = task_mask_lm
lowerCAmelCase__ : Dict = task_obj_predict
lowerCAmelCase__ : int = task_qa
lowerCAmelCase__ : Any = visual_obj_loss
lowerCAmelCase__ : List[str] = visual_attr_loss
lowerCAmelCase__ : Optional[int] = visual_feat_loss
lowerCAmelCase__ : Optional[int] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**a__ )
| 568 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a ( a__ ):
snake_case__ = '''unispeech-sat'''
def __init__( self , _snake_case=32 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=1_28 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=3_20 , _snake_case=2 , _snake_case=0.1 , _snake_case=1_00 , _snake_case=2_56 , _snake_case=2_56 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=2_56 , _snake_case=(5_12, 5_12, 5_12, 5_12, 15_00) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=5_12 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=5_04 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = vocab_size
lowerCAmelCase = num_clusters
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = feat_quantizer_dropout
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 4 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473 | 0 |
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Dict = vocab_size
__a : Optional[int] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Tuple = hidden_act
__a : int = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : int = layer_norm_eps
__a : List[Any] = position_embedding_type
__a : Optional[int] = use_cache
__a : Optional[Any] = classifier_dropout
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output | 52 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase : str = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 717 |
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 0 |
from collections.abc import Sequence
def UpperCamelCase ( _UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def UpperCamelCase ( _UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ) -> float:
'''simple docstring'''
_lowercase : Union[str, Any] = 0.0
for coeff in reversed(_UpperCAmelCase ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase_ : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 461 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowercase ( __snake_case ):
def __init__(self : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : str ) -> Dict:
_lowercase : Tuple = dataset
_lowercase : List[str] = process
_lowercase : Any = params
def __len__(self : Optional[Any] ) -> Any:
return len(self.dataset )
def __getitem__(self : int , snake_case : Any ) -> int:
_lowercase : Optional[Any] = self.dataset[i]
_lowercase : Union[str, Any] = self.process(snake_case , **self.params )
return processed
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple , snake_case : List[str]=None ) -> Optional[int]:
_lowercase : List[str] = loader
_lowercase : Optional[Any] = infer
_lowercase : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowercase : str = None
_lowercase : Optional[int] = loader_batch_size
# Internal bookkeeping
_lowercase : Dict = None
_lowercase : Union[str, Any] = None
def __len__(self : Any ) -> Any:
return len(self.loader )
def __iter__(self : int ) -> Optional[Any]:
_lowercase : List[str] = iter(self.loader )
return self
def _a(self : int ) -> List[str]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowercase : Any = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowercase : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case , snake_case ):
# Convert ModelOutput to tuple first
_lowercase : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowercase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case , snake_case ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowercase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowercase : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowercase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowercase : int = self._loader_batch_data.__class__(snake_case )
self._loader_batch_index += 1
return result
def _a(self : Any ) -> str:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowercase : str = next(self.iterator )
_lowercase : Any = self.infer(snake_case , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case , torch.Tensor ):
_lowercase : int = processed
else:
_lowercase : List[str] = list(processed.keys() )[0]
_lowercase : List[str] = processed[key]
if isinstance(snake_case , snake_case ):
_lowercase : int = len(snake_case )
else:
_lowercase : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_lowercase : Any = processed
_lowercase : Dict = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowercase ( __snake_case ):
def __init__(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[str]=None ) -> List[str]:
super().__init__(snake_case , snake_case , snake_case )
def __iter__(self : List[Any] ) -> str:
_lowercase : int = iter(self.loader )
_lowercase : Optional[int] = None
return self
def _a(self : Optional[Any] ) -> Optional[int]:
if self.subiterator is None:
_lowercase : List[str] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowercase : str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowercase : Any = self.infer(next(self.iterator ) , **self.params )
_lowercase : Optional[int] = next(self.subiterator )
return processed
class __lowercase ( __snake_case ):
def __iter__(self : List[str] ) -> List[str]:
_lowercase : Any = iter(self.loader )
return self
def _a(self : List[Any] ) -> Union[str, Any]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowercase : Dict = False
_lowercase : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Tuple = self.loader_batch_item()
_lowercase : List[Any] = item.pop("is_last" )
accumulator.append(snake_case )
if is_last:
return accumulator
while not is_last:
_lowercase : List[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case , torch.Tensor ):
_lowercase : Optional[Any] = processed
else:
_lowercase : Tuple = list(processed.keys() )[0]
_lowercase : int = processed[key]
if isinstance(snake_case , snake_case ):
_lowercase : str = len(snake_case )
else:
_lowercase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Tuple = observed_batch_size
_lowercase : Any = processed
_lowercase : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
_lowercase : str = self.loader_batch_item()
_lowercase : int = item.pop("is_last" )
accumulator.append(snake_case )
if is_last:
return accumulator
else:
_lowercase : str = processed
_lowercase : int = item.pop("is_last" )
accumulator.append(snake_case )
return accumulator
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : Dataset , snake_case : str ) -> List[Any]:
_lowercase : Optional[Any] = dataset
_lowercase : Any = key
def __len__(self : Any ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__(self : int , snake_case : Any ) -> Any:
return self.dataset[i][self.key]
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : Dataset , snake_case : str , snake_case : str ) -> Dict:
_lowercase : int = dataset
_lowercase : Optional[Any] = keya
_lowercase : Tuple = keya
def __len__(self : List[str] ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__(self : Optional[Any] , snake_case : Dict ) -> int:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 461 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=64 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=[1, 16, 4, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase__ : Union[str, Any] = (self.image_size // 32) ** 2
UpperCAmelCase__ : Union[str, Any] = num_patches + 1
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
UpperCAmelCase__ : Any = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ViTHybridModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : str = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase__ : List[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __snake_case ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Dict = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def __snake_case ( self ):
UpperCAmelCase__ : str = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
UpperCAmelCase__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
UpperCAmelCase__ : List[Any] = model(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase__ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 254 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = num_stages
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = out_features
UpperCAmelCase__ : Tuple = out_indices
UpperCAmelCase__ : Dict = scope
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : int = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : str = ConvNextModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , lowercase ):
UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase : List[str] = ConvNextConfig
UpperCamelCase : Tuple = False
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
| 254 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=3, A=4, A=[10, 20, 30, 40], A=[2, 2, 3, 2], A=True, A=True, A=37, A="gelu", A=10, A=0.02, A=["stage2", "stage3", "stage4"], A=[2, 3, 4], A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : str = num_stages
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Any = out_indices
SCREAMING_SNAKE_CASE : Dict = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=A, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextVaModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A : List[str] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A : Any = False
A : Tuple = False
A : Union[str, Any] = False
A : Any = False
A : List[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=A, has_text_modality=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Tuple = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
SCREAMING_SNAKE_CASE : Optional[int] = model_class(A )
model.to(A )
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : int = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A, A, return_labels=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(A )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(A )
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
| 28 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : List[str] = """pt"""
__UpperCamelCase : List[Any] = """tf"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase )
model_tf.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Optional[int] = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : Dict = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Any = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Union[str, Any] = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Tuple = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = FeaturesManager.determine_framework(self.test_model )
| 279 | 0 |
"""simple docstring"""
_a : Optional[int] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_dataset(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[str] =path + '.py'
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : str ):
'''simple docstring'''
inspect_metric(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[Any] =path + '.py'
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : str =get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Dict =get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCAmelCase : Optional[int] =expected_configs[0]
assert expected_config in infos
_UpperCAmelCase : Optional[int] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : str =get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
_UpperCAmelCase : Dict =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase , config_name=__lowerCamelCase )
| 446 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 100 )-> int:
UpperCamelCase = (n * (n + 1) // 2) ** 2
UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 35 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase : Any =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase : Optional[Any] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[str] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Optional[Any] =2
lowercase : Optional[int] =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , )
lowercase : int =floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Tuple =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Optional[int] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase : Optional[int] =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Dict =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Optional[int] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str =MultiControlNetModel([controlneta, controlneta] )
lowercase : Tuple ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Optional[int] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =2
lowercase : Dict =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
]
lowercase : List[str] =floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Optional[Any] =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Optional[int] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
lowercase : Dict =10.0
lowercase : str =4
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =steps
lowercase : Dict =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : Union[str, Any] =scale
lowercase : int =pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : str =scale
lowercase : Tuple =pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[Any] =steps
lowercase : int =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.get_dummy_components()
lowercase : List[str] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Any =ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase : List[Any] =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : List[Any] ='''evil space-punk bird'''
lowercase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase : Any =load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase : Union[str, Any] =pipe(
UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
lowercase : Optional[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 92 |
import requests
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> None:
_snake_case : Union[str, Any] = {"""Content-Type""": """application/json"""}
_snake_case : Tuple = requests.post(SCREAMING_SNAKE_CASE__ , json={"""text""": message_body} , headers=SCREAMING_SNAKE_CASE__ )
if response.status_code != 200:
_snake_case : Any = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 477 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
from __future__ import annotations
import queue
class snake_case_ :
def __init__( self , __lowercase ) -> int:
lowerCamelCase : int =data
lowerCamelCase : str =None
lowerCamelCase : Dict =None
def A__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
lowerCamelCase : Tuple =input('''Enter the value of the root node: ''' ).strip().lower()
lowerCamelCase : queue.Queue =queue.Queue()
lowerCamelCase : Dict =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Dict =q.get()
lowerCamelCase : Optional[int] =F"Enter the left node of {node_found.data}: "
lowerCamelCase : Tuple =input(SCREAMING_SNAKE_CASE_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCamelCase : Union[str, Any] =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Optional[Any] =left_node
q.put(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =F"Enter the right node of {node_found.data}: "
lowerCamelCase : int =input(SCREAMING_SNAKE_CASE_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
lowerCamelCase : str =TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Optional[Any] =right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue =queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Optional[int] =q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue =queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Tuple =[]
while not q.empty():
lowerCamelCase : List[str] =q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] =[]
lowerCamelCase : Tuple =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =n.left
# end of while means current node doesn't have left child
lowerCamelCase : Tuple =stack.pop()
# start to traverse its right child
lowerCamelCase : Union[str, Any] =n.right
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] =[]
lowerCamelCase : List[Any] =node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =n.left
lowerCamelCase : Dict =stack.pop()
print(n.data , end=''',''' )
lowerCamelCase : Union[str, Any] =n.right
def A__ ( SCREAMING_SNAKE_CASE_ ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase , lowerCamelCase : Optional[int] =[], []
lowerCamelCase : Dict =node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase : int =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def A__ ( SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_=5_0 , SCREAMING_SNAKE_CASE_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCamelCase , lowerCamelCase : Dict =divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
snake_case_ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 262 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ):
__magic_name__ = WATERMARK_BITS
__magic_name__ = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case__ ( self : Optional[Any] , a__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__magic_name__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = [self.encoder.encode(a__ , '''dwtDct''' ) for image in images]
__magic_name__ = torch.from_numpy(np.array(a__ ) ).permute(0 , 3 , 1 , 2 )
__magic_name__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 432 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : int , a__ : Optional[int] , a__ : Union[str, Any]=768 ):
super().__init__(a__ )
__magic_name__ = proj_size
__magic_name__ = CLIPVisionModel(a__ )
__magic_name__ = PaintByExampleMapper(a__ )
__magic_name__ = nn.LayerNorm(config.hidden_size )
__magic_name__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__magic_name__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case__ ( self : Tuple , a__ : Any , a__ : List[str]=False ):
__magic_name__ = self.model(pixel_values=a__ )
__magic_name__ = clip_output.pooler_output
__magic_name__ = self.mapper(latent_states[:, None] )
__magic_name__ = self.final_layer_norm(a__ )
__magic_name__ = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , a__ : Dict ):
super().__init__()
__magic_name__ = (config.num_hidden_layers + 1) // 5
__magic_name__ = config.hidden_size
__magic_name__ = 1
__magic_name__ = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn='''gelu''' , attention_bias=a__ )
for _ in range(a__ )
] )
def snake_case__ ( self : List[str] , a__ : List[Any] ):
for block in self.blocks:
__magic_name__ = block(a__ )
return hidden_states
| 432 | 1 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( __lowerCAmelCase ):
a__ = ComputeEnvironment.AMAZON_SAGEMAKER
a__ = True
a__ = """ml.p3.2xlarge"""
a__ = """accelerate_sagemaker_execution_role"""
a__ = """hf-sm"""
a__ = """us-east-1"""
a__ = 1
a__ = """accelerate-sagemaker-1"""
a__ = """1.6"""
a__ = """4.4"""
a__ = """train.py"""
a__ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
a__ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['model_name_or_path'] , lowercase)
assert isinstance(converted_args['do_train'] , lowercase)
assert isinstance(converted_args['epochs'] , lowercase)
assert isinstance(converted_args['learning_rate'] , lowercase)
assert isinstance(converted_args['max_steps'] , lowercase)
with pytest.raises(lowercase):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 718 | """simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowercase ( _a ):
snake_case_ : Dict = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a ):
snake_case_, snake_case_ : Any = emb.weight.shape
snake_case_ : Dict = nn.Linear(_a , _a , bias=_a )
snake_case_ : Union[str, Any] = emb.weight.data
return lin_layer
def __lowercase ( _a ):
snake_case_ : Dict = torch.load(_a , map_location='''cpu''' )
snake_case_ : Optional[Any] = Namespace(**checkpoint['''cfg''']['''model'''] )
snake_case_ : str = checkpoint['''model''']
remove_ignore_keys_(_a )
snake_case_ : Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ : Dict = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
snake_case_ : str = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
snake_case_ : int = XGLMForCausalLM(_a )
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
print(_a )
snake_case_ : Any = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Optional[int] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 123 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = KandinskyInpaintPipeline
_lowerCAmelCase : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowerCAmelCase : Optional[int] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowerCAmelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : List[str] = False
@property
def _snake_case ( self : Optional[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return 32
@property
def _snake_case ( self : List[Any] ):
return self.time_input_dim
@property
def _snake_case ( self : Any ):
return self.time_input_dim * 4
@property
def _snake_case ( self : Any ):
return 100
@property
def _snake_case ( self : str ):
snake_case_ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _snake_case ( self : Dict ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
snake_case_ : Tuple = MultilingualCLIP(lowercase_ )
snake_case_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self : int ):
torch.manual_seed(0 )
snake_case_ : Dict = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def _snake_case ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Optional[int] = self.dummy_tokenizer
snake_case_ : Any = self.dummy_unet
snake_case_ : Tuple = self.dummy_movq
snake_case_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , )
snake_case_ : int = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=0 ):
snake_case_ : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Any = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
snake_case_ : Tuple = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Dict = 0
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : List[Any] = torch.manual_seed(lowercase_ )
else:
snake_case_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self : str ):
snake_case_ : Any = '''cpu'''
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Tuple = self.pipeline_class(**lowercase_ )
snake_case_ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : str = pipe(**self.get_dummy_inputs(lowercase_ ) )
snake_case_ : int = output.images
snake_case_ : Tuple = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
snake_case_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _snake_case ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ):
snake_case_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : str = 0
snake_case_ : Tuple = '''a hat'''
snake_case_ : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
snake_case_ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
snake_case_ : Optional[Any] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ : Tuple = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case_ : Any = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
snake_case_ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 123 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : List[str] = {"""vocab_file""": """spiece.model"""}
lowercase__ : List[str] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
lowercase__ : List[Any] = {"""bert_for_seq_generation""": 5_1_2}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<::::>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Dict = vocab_file
lowerCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.__dict__.copy()
lowerCAmelCase_ : str = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase_ : Optional[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ : int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
lowerCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 317 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """feature_extractor"""]
_SCREAMING_SNAKE_CASE = """TvltImageProcessor"""
_SCREAMING_SNAKE_CASE = """TvltFeatureExtractor"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = image_processor
lowerCAmelCase_ : Optional[int] = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowerCAmelCase_ : List[str] = None
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(SCREAMING_SNAKE_CASE_ , mask_pixel=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images_mixed is not None:
lowerCAmelCase_ : int = self.image_processor(SCREAMING_SNAKE_CASE_ , is_mixed=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
lowerCAmelCase_ : Optional[Any] = self.feature_extractor(
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , mask_audio=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE_ )
return output_dict
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.image_processor.model_input_names
lowerCAmelCase_ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 317 | 1 |
'''simple docstring'''
from math import ceil
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = list(range(0 , _a ) )
A_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A_ = []
for i in device_map_blocks:
if device_map_blocks.count(_a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_a )
# Missing blocks
A_ = [i for i in blocks if i not in device_map_blocks]
A_ = [i for i in device_map_blocks if i not in blocks]
if len(_a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_a ) )
if len(_a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_a ) )
if len(_a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_a ) )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
A_ = list(range(_a ) )
A_ = int(ceil(n_layers / len(_a ) ) )
A_ = [layers[i : i + n_blocks] for i in range(0 , _a , _a )]
return dict(zip(_a , _a ) )
| 452 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''poolformer'''
def __init__( self , A__=3 , A__=16 , A__=16 , A__=3 , A__=4.0 , A__=[2, 2, 6, 2] , A__=[64, 128, 320, 512] , A__=[7, 3, 3, 3] , A__=[4, 2, 2, 2] , A__=[2, 1, 1, 1] , A__=4 , A__=0.0 , A__="gelu" , A__=True , A__=1E-5 , A__=0.02 , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Any = num_channels
UpperCAmelCase_: Union[str, Any] = patch_size
UpperCAmelCase_: str = stride
UpperCAmelCase_: Tuple = padding
UpperCAmelCase_: Dict = pool_size
UpperCAmelCase_: Optional[int] = hidden_sizes
UpperCAmelCase_: Dict = mlp_ratio
UpperCAmelCase_: int = depths
UpperCAmelCase_: Any = patch_sizes
UpperCAmelCase_: str = strides
UpperCAmelCase_: int = num_encoder_blocks
UpperCAmelCase_: Optional[Any] = drop_path_rate
UpperCAmelCase_: Optional[Any] = hidden_act
UpperCAmelCase_: int = use_layer_scale
UpperCAmelCase_: Union[str, Any] = layer_scale_init_value
UpperCAmelCase_: int = initializer_range
super().__init__(**A__ )
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 2E-3 | 137 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a__ : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 235 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
lowerCamelCase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = """A red cat sitting on a park bench"""
lowerCamelCase__ = np.random.RandomState(0 )
lowerCamelCase__ = pipe(
prompt=a_ , image=a_ , mask_image=a_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=a_ , output_type="""np""" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 235 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = 2
while True:
if is_prime(_UpperCAmelCase ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 200_0000 ):
return sum(takewhile(lambda _UpperCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 4 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase_ : str = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase_ : Optional[int] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase_ : Dict = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase_ : str = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase_ : Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=[1, 10, 100] , snake_case__=4 , snake_case__=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = Counter()
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : str = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
_SCREAMING_SNAKE_CASE : Any = candidate + "\n" + test_case
_SCREAMING_SNAKE_CASE : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
_SCREAMING_SNAKE_CASE : List[Any] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = [], []
for result in results.values():
result.sort()
_SCREAMING_SNAKE_CASE : List[str] = [r[1]["passed"] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
_SCREAMING_SNAKE_CASE : List[str] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE : str = k
_SCREAMING_SNAKE_CASE : Any = {F'''pass@{k}''': estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
def estimator(lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = itertools.repeat(lowerCamelCase__, len(lowerCamelCase__ ) )
else:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = iter(lowerCamelCase__ )
return np.array([estimator(int(lowerCamelCase__ ), int(lowerCamelCase__ ), lowerCamelCase__ ) for n, c in zip(lowerCamelCase__, lowerCamelCase__ )] )
| 572 | 0 |
"""simple docstring"""
from itertools import count
def _a ( _snake_case = 50 ):
"""simple docstring"""
UpperCAmelCase = [1] * min_block_length
for n in count(_snake_case ):
fill_count_functions.append(1 )
for block_length in range(_snake_case , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a_ : int = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
a_ : List[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
a_ : Union[str, Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return x[0]
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = get_letter_count(UpperCAmelCase__ )
lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase__ )
lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCAmelCase__ )
lowerCamelCase = "".join(freq_to_letter[freq] )
lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCAmelCase__ , reverse=UpperCAmelCase__ )
lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase__ )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = get_frequency_order(UpperCAmelCase__ )
lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a_ : Tuple = datasets.logging.get_logger(__name__)
a_ : List[str] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
a_ : Any = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
a_ : str = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__="dummy_doc" ):
"""simple docstring"""
lowerCamelCase = {doc: key_lines}
lowerCamelCase = {doc: sys_lines}
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase , lowerCamelCase = reader.get_doc_mentions(UpperCAmelCase__ , key_doc_lines[doc] , UpperCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(UpperCAmelCase__ , key_doc_lines[doc] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase = reader.get_doc_mentions(UpperCAmelCase__ , sys_doc_lines[doc] , UpperCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(UpperCAmelCase__ , key_doc_lines[doc] , UpperCAmelCase__ , UpperCAmelCase__ )
if remove_nested:
lowerCamelCase , lowerCamelCase = reader.remove_nested_coref_mentions(UpperCAmelCase__ , UpperCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase , lowerCamelCase = reader.remove_nested_coref_mentions(UpperCAmelCase__ , UpperCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.get_mention_assignments(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = reader.get_mention_assignments(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = get_coref_infos(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
for name, metric in metrics:
lowerCamelCase , lowerCamelCase , lowerCamelCase = evaluator.evaluate_documents(UpperCAmelCase__ , UpperCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowerCamelCase = line.split()[5]
if not parse_col == "-":
lowerCamelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def _a (self , __a , __a , __a=True , __a=False , __a=False , __a=False ):
'''simple docstring'''
lowerCamelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowerCamelCase = util.check_gold_parse_annotation(__a )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase = evaluate(
key_lines=__a , sys_lines=__a , metrics=__a , NP_only=__a , remove_nested=__a , keep_singletons=__a , min_span=__a , )
return score | 623 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Any = torch.device("cpu")
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def A (__A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def A (__A : List[Any] , __A : List[str] , __A : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def A (__A : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
for k in state_dict.keys():
UpperCAmelCase_ = k
if ".pwconv" in k:
UpperCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
UpperCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
UpperCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
UpperCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
UpperCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
UpperCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
UpperCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def A (__A : int , __A : List[Any] , __A : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ = [3, 3, 6, 4]
UpperCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ = [3, 3, 9, 6]
UpperCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ = [4, 3, 10, 5]
UpperCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ = [4, 4, 12, 6]
UpperCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , check_hash=__UpperCamelCase )
else:
UpperCAmelCase_ = torch.load(__UpperCamelCase , map_location='''cpu''' )
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
UpperCAmelCase_ = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
UpperCAmelCase_ = processor(images=__UpperCamelCase , return_tensors='''pt''' )
# compare outputs from both models
UpperCAmelCase_ = get_expected_output(__UpperCamelCase )
UpperCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
snake_case_ : Dict = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 713 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = '''summarization'''
UpperCAmelCase__ : str = ['''loss''']
UpperCAmelCase__ : Tuple = ROUGE_KEYS
UpperCAmelCase__ : List[str] = '''rouge2'''
def __init__( self : int , _snake_case : List[Any] , **_snake_case : Any):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_snake_case)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCamelCase ( self : List[str] , _snake_case : Dict[str, torch.Tensor]):
"""simple docstring"""
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def lowerCamelCase ( self : List[Any] , _snake_case : Any , **_snake_case : List[str]):
"""simple docstring"""
return self.model(_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[int]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case)
return lmap(str.strip , _snake_case)
def lowerCamelCase ( self : int , _snake_case : dict):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _snake_case):
UpperCAmelCase_ = self.model._shift_right(_snake_case)
else:
UpperCAmelCase_ = shift_tokens_right(_snake_case , _snake_case)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_snake_case)
UpperCAmelCase_ = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_snake_case)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_snake_case , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case)
return (loss,)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCamelCase ( self : Optional[Any] , _snake_case : Any , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self._step(_snake_case)
UpperCAmelCase_ = dict(zip(self.loss_names , _snake_case))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
return self._generative_step(_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Any , _snake_case : Dict="val"):
"""simple docstring"""
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_snake_case).type_as(_snake_case)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_snake_case)
UpperCAmelCase_ = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_snake_case) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case)
def lowerCamelCase ( self : int , _snake_case : dict):
"""simple docstring"""
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_snake_case)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_snake_case)
UpperCAmelCase_ = dict(zip(self.loss_names , _snake_case))
UpperCAmelCase_ = self.calc_generative_metrics(_snake_case , _snake_case)
UpperCAmelCase_ = np.mean(lmap(_snake_case , _snake_case))
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case)
return base_metrics
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict , _snake_case : List[Any]):
"""simple docstring"""
return self._generative_step(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any]):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix='''test''')
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : int , _snake_case : bool = False):
"""simple docstring"""
UpperCAmelCase_ = self.get_dataset(_snake_case)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1)
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_snake_case)
return dataloader
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def lowerCamelCase ( _snake_case : Union[str, Any] , _snake_case : Optional[Any]):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case)
add_generic_args(_snake_case , _snake_case)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_snake_case)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_snake_case)
parser.add_argument('''--max_tokens_per_batch''' , type=_snake_case , default=_snake_case)
parser.add_argument('''--logger_name''' , type=_snake_case , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_snake_case , default=-1 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_snake_case , default=500 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_snake_case , default=-1 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_snake_case , default='''summarization''' , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_snake_case , default=0.0 , required=_snake_case)
parser.add_argument('''--src_lang''' , type=_snake_case , default='''''' , required=_snake_case)
parser.add_argument('''--tgt_lang''' , type=_snake_case , default='''''' , required=_snake_case)
parser.add_argument('''--eval_beams''' , type=_snake_case , default=_snake_case , required=_snake_case)
parser.add_argument(
'''--val_metric''' , type=_snake_case , default=_snake_case , required=_snake_case , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_snake_case , default=_snake_case , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_snake_case , default=1 , required=_snake_case , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_snake_case , default=-1 , required=_snake_case , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = '''translation'''
UpperCAmelCase__ : Union[str, Any] = ['''loss''']
UpperCAmelCase__ : List[str] = ['''bleu''']
UpperCAmelCase__ : Optional[Any] = '''bleu'''
def __init__( self : Tuple , _snake_case : int , **_snake_case : Any):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def lowerCamelCase ( self : List[str] , _snake_case : Tuple , _snake_case : List[str]):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case)
def A (__A : Tuple , __A : Optional[Any]=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__A )
check_output_dir(__A , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__A )
else:
UpperCAmelCase_ = TranslationModule(__A )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __A )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__A , __A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __A ) , early_stopping_callback=__A , logger=__A , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__A ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
snake_case_ : Optional[Any] = pl.Trainer.add_argparse_args(parser)
snake_case_ : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case_ : Dict = parser.parse_args()
main(args)
| 169 | 0 |
def _snake_case ( __snake_case ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(lowerCamelCase_ ) # No of vertices in graph
_UpperCamelCase = [0] * n
_UpperCamelCase = [False] * n
def dfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = True
_UpperCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , id_ )
_UpperCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_UpperCamelCase = min(low[at] , low[to] )
_UpperCamelCase = []
for i in range(lowerCamelCase_ ):
if not visited[i]:
dfs(lowerCamelCase_ , -1 , lowerCamelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
with open(UpperCAmelCase_ , encoding='utf-8') as input_file:
UpperCamelCase__ : List[str] = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
UpperCamelCase__ : List[Any] = input_file.read()
UpperCamelCase__ : Dict = regexp.search(UpperCAmelCase_)
return match
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
with open(UpperCAmelCase_ , encoding='utf-8') as input_file:
UpperCamelCase__ : Optional[int] = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Any = regexp.finditer(UpperCAmelCase_)
UpperCamelCase__ : List[str] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : int = Path('./datasets')
UpperCamelCase__ : List[Any] = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_)):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Tuple = Path('./datasets')
UpperCamelCase__ : Dict = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_)):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 596 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = value_function
SCREAMING_SNAKE_CASE : Dict = unet
SCREAMING_SNAKE_CASE : Dict = scheduler
SCREAMING_SNAKE_CASE : Any = env
SCREAMING_SNAKE_CASE : Optional[int] = env.get_dataset()
SCREAMING_SNAKE_CASE : List[Any] = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : int = self.data[key].mean()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : Tuple = self.data[key].std()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : Dict = env.observation_space.shape[0]
SCREAMING_SNAKE_CASE : Any = env.action_space.shape[0]
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_ , device=self.unet.device )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
for key, val in cond.items():
SCREAMING_SNAKE_CASE : List[Any] = val.clone()
return x_in
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = x.shape[0]
SCREAMING_SNAKE_CASE : List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
SCREAMING_SNAKE_CASE : List[str] = torch.full((batch_size,) , lowerCamelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
SCREAMING_SNAKE_CASE : Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : str = torch.autograd.grad([y.sum()] , [x] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler._get_variance(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(0.5 * posterior_variance )
SCREAMING_SNAKE_CASE : Tuple = model_std * grad
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = x.detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = x + scale * grad
SCREAMING_SNAKE_CASE : Tuple = self.reset_xa(lowerCamelCase_ , lowerCamelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE : int = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , predict_epsilon=lowerCamelCase_ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
SCREAMING_SNAKE_CASE : List[str] = self.reset_xa(lowerCamelCase_ , lowerCamelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE : Tuple = self.to_torch(lowerCamelCase_ )
return x, y
def __call__( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=64 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Any=0.1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.normalize(lowerCamelCase_ , """observations""" )
SCREAMING_SNAKE_CASE : Optional[int] = obs[None].repeat(lowerCamelCase_ , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = {0: self.to_torch(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , device=self.unet.device )
SCREAMING_SNAKE_CASE : List[str] = self.reset_xa(lowerCamelCase_ , lowerCamelCase_ , self.action_dim )
SCREAMING_SNAKE_CASE : str = self.to_torch(lowerCamelCase_ )
# run the diffusion process
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.run_diffusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# sort output trajectories by value
SCREAMING_SNAKE_CASE : List[Any] = y.argsort(0 , descending=lowerCamelCase_ ).squeeze()
SCREAMING_SNAKE_CASE : int = x[sorted_idx]
SCREAMING_SNAKE_CASE : List[Any] = sorted_values[:, :, : self.action_dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = actions.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : str = self.de_normalize(lowerCamelCase_ , key="""actions""" )
# select the action with the highest value
if y is not None:
SCREAMING_SNAKE_CASE : int = 0
else:
# if we didn't run value guiding, select a random action
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randint(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 79 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = """Hello world! cécé herlolip"""
__UpperCAmelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ )
original.eval()
SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__UpperCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 79 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
A__ = {"""target_lang""": """fi""", """source_lang""": """en"""}
A__ = """>>zh<<"""
A__ = """Helsinki-NLP/"""
if is_torch_available():
A__ = """pt"""
elif is_tf_available():
A__ = """tf"""
else:
A__ = """jax"""
@require_sentencepiece
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = MarianTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = True
def snake_case ( self : Dict ):
super().setUp()
lowerCamelCase :Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Dict = Path(self.tmpdirname )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowerCamelCase :Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Union[str, Any] , **__snake_case : Optional[Any] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : int , __snake_case : List[Any] ):
return (
"This is a test",
"This is a test",
)
def snake_case ( self : Optional[Any] ):
lowerCamelCase :str = '''</s>'''
lowerCamelCase :Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__snake_case ) , 9 )
def snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
lowerCamelCase :Optional[Any] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
lowerCamelCase :List[str] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__snake_case , batch.input_ids[0] )
lowerCamelCase :str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__snake_case )
lowerCamelCase :str = [x.name for x in Path(__snake_case ).glob('''*''' )]
self.assertIn('''source.spm''' , __snake_case )
MarianTokenizer.from_pretrained(__snake_case )
def snake_case ( self : str ):
lowerCamelCase :Tuple = self.get_tokenizer()
lowerCamelCase :Tuple = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def snake_case ( self : List[Any] ):
lowerCamelCase :str = self.get_tokenizer()
lowerCamelCase :Any = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def snake_case ( self : List[Any] ):
# fmt: off
lowerCamelCase :str = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[Any] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowerCamelCase :int = '''Tämä on testi'''
lowerCamelCase :Union[str, Any] = '''This is a test'''
lowerCamelCase :Any = [76, 7, 2047, 2]
lowerCamelCase :Any = [69, 12, 11, 940, 2]
lowerCamelCase :int = tokenizer(__snake_case ).input_ids
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :str = tokenizer(text_target=__snake_case ).input_ids
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 166 | import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : int , __snake_case : List[str]=13 , __snake_case : int=7 , __snake_case : int=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : str=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=512 , __snake_case : Dict=16 , __snake_case : Optional[int]=2 , __snake_case : str=0.0_2 , __snake_case : int=4 , ):
lowerCamelCase :Union[str, Any] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Dict = seq_length
lowerCamelCase :int = is_training
lowerCamelCase :int = use_attention_mask
lowerCamelCase :Optional[Any] = use_token_type_ids
lowerCamelCase :int = use_labels
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :str = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :Tuple = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Any = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :Dict = type_vocab_size
lowerCamelCase :int = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Any = num_choices
def snake_case ( self : Any ):
lowerCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Any = None
if self.use_attention_mask:
lowerCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Dict = None
if self.use_token_type_ids:
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : Any ):
lowerCamelCase :int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = config_and_inputs
lowerCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :List[Any] = True
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : Optional[int] ):
lowerCamelCase :str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase :Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :List[str] = model(__snake_case )[0]
lowerCamelCase :Tuple = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
lowerCamelCase :Optional[Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Union[str, Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :int = model(__snake_case )[0]
# compare the actual values for a slice.
lowerCamelCase :List[Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 166 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 710 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __magic_name__=None )-> List[str]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : List[str] = subparsers.add_parser("test" )
else:
snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ : str = script_name
else:
snake_case_ : Any = F'''--config_file={args.config_file} {script_name}'''
snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split()
snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Dict = test_command_parser()
snake_case_ : Dict = parser.parse_args()
test_command(__magic_name__ )
if __name__ == "__main__":
main()
| 656 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple=7 , UpperCamelCase_: int=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Optional[Any]=30 , UpperCamelCase_: str=400 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[str]=None , ) -> Tuple:
"""simple docstring"""
lowercase__ = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1_024, 2_048, 4_096]
lowercase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowercase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2_048
lowercase__ = image_processor(UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
lowercase__ = '''Hello'''
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self , num_channels=4 )
lowercase__ = 3
@property
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 43 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =torch.load(__UpperCamelCase, map_location="""cpu""" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__ =torch.load(__UpperCamelCase, map_location="""cpu""" )["""model"""]
# pop unnecessary weights
SCREAMING_SNAKE_CASE__ =[
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__ =sd.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__ =sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.q_proj.""" )
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.k_proj.""" )
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.v_proj.""" )
SCREAMING_SNAKE_CASE__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =torch.split(__UpperCamelCase, depth // 3, dim=0 )
SCREAMING_SNAKE_CASE__ =q
SCREAMING_SNAKE_CASE__ =k
SCREAMING_SNAKE_CASE__ =v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE__ =load_checkpoint(__UpperCamelCase )
if config is not None:
SCREAMING_SNAKE_CASE__ =OPTConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =OPTConfig()
SCREAMING_SNAKE_CASE__ =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 151 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def A_ ( lowercase_ , lowercase_ ) ->Optional[Any]:
"""simple docstring"""
return torch.atana(lowercase_ , lowercase_ ) / math.pi * 2
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase_ , lowercase_ )
class a_( lowercase__ ):
"""simple docstring"""
pass
class a_( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4)
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion)
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__)
def A_ ( lowercase_ ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(f'''wget {url} ./''' )
return f'''./{model_name}.ckpt'''
__UpperCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__UpperCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__UpperCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__UpperCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__UpperCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__UpperCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( lowercase_ ) ->List[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
return name.replace(lowercase_ , lowercase_ )
elif name.startswith(lowercase_ ):
return [name.replace(lowercase_ , lowercase_ ) for v in value]
raise ValueError(f'''Attn error with {name}''' )
def A_ ( lowercase_ , lowercase_=1_3 ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(lowercase_ ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f'''down_blocks.{depth}'''
elif depth > 0 and int(lowercase_ ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f'''up_blocks.{max_depth - 1}''' if int(lowercase_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(lowercase_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(lowercase_ )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(lowercase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE = transform_conv_attns(lowercase_ , lowercase_ , lowercase_ )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->List[str]:
"""simple docstring"""
if len(lowercase_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE = download(lowercase_ )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowercase_ , sample_rate=lowercase_ )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(lowercase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase_ )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(lowercase_ )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase_ ) == 0, f'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('kernel' ) for k in list(lowercase_ ) ), f'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(lowercase_ )
SCREAMING_SNAKE_CASE = 1_0_0
SCREAMING_SNAKE_CASE = 3_3
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=lowercase_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(lowercase_ )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=lowercase_ ).to(lowercase_ )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=lowercase_ )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(lowercase_ )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=lowercase_ , scheduler=lowercase_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(3_3 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=lowercase_ , generator=lowercase_ ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(lowercase_ , lowercase_ , lowercase_ , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowercase_ )
print('Diff max' , lowercase_ )
assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/'''
print(f'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__UpperCAmelCase = parser.parse_args()
main(args)
| 259 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__UpperCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__UpperCAmelCase = f'down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__UpperCAmelCase = f'up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__UpperCAmelCase = f'up_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.conv.'
__UpperCAmelCase = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__UpperCAmelCase = "mid_block.attentions.0."
__UpperCAmelCase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__UpperCAmelCase = f'mid_block.resnets.{j}.'
__UpperCAmelCase = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__UpperCAmelCase = f'encoder.down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.'
__UpperCAmelCase = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__UpperCAmelCase = f'decoder.up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__UpperCAmelCase = f'mid_block.resnets.{i}.'
__UpperCAmelCase = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
SCREAMING_SNAKE_CASE = reshape_weight_for_sd(lowercase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__UpperCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__UpperCAmelCase = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__UpperCAmelCase = {"q": 0, "k": 1, "v": 2}
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
return new_state_dict
def A_ ( lowercase_ ) ->Dict:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__UpperCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__UpperCAmelCase = load_file(unet_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__UpperCAmelCase = load_file(vae_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__UpperCAmelCase = load_file(text_enc_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__UpperCAmelCase = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__UpperCAmelCase = convert_unet_state_dict(unet_state_dict)
__UpperCAmelCase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__UpperCAmelCase = convert_vae_state_dict(vae_state_dict)
__UpperCAmelCase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__UpperCAmelCase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__UpperCAmelCase = {"transformer." + k: v for k, v in text_enc_dict.items()}
__UpperCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__UpperCAmelCase = convert_text_enc_state_dict(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__UpperCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__UpperCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__UpperCAmelCase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 259 | 1 |
'''simple docstring'''
UpperCAmelCase__ : int = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 475 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
A__ : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
A__ : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
A__ : Optional[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def lowercase_ ( self , A_ , A_ , A_=None , A_=False , A_=False , A_=False , ) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowercase: Optional[Any] = np.array([re.sub(A_ , '''''' , A_ ) for x in predictions] )
_lowercase: str = np.array([re.sub(A_ , '''''' , A_ ) for x in references] )
else:
_lowercase: Optional[Any] = np.asarray(A_ )
_lowercase: Any = np.asarray(A_ )
if ignore_case:
_lowercase: Union[str, Any] = np.char.lower(A_ )
_lowercase: Optional[int] = np.char.lower(A_ )
if ignore_punctuation:
_lowercase: Union[str, Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
_lowercase: Any = np.char.translate(A_ , table=A_ )
_lowercase: List[Any] = np.char.translate(A_ , table=A_ )
if ignore_numbers:
_lowercase: Optional[Any] = string.digits.maketrans('''''' , '''''' , string.digits )
_lowercase: str = np.char.translate(A_ , table=A_ )
_lowercase: List[str] = np.char.translate(A_ , table=A_ )
_lowercase: str = predictions == references
return {"exact_match": np.mean(A_ ) * 100}
| 272 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Any = pytest.mark.integration
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self ) -> int:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
_lowercase: List[str] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
_lowercase: Dict = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
_lowercase: Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: List[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowercase: List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_lowercase: int = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=A_ )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> Any:
"""simple docstring"""
import faiss
_lowercase: str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: int = 1
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa )[::-1]
_lowercase , _lowercase: str = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
_lowercase: Tuple = [scores[0] for scores in total_scores]
_lowercase: Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import faiss
_lowercase: Union[str, Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowercase: Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
_lowercase: List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Any = faiss.IndexFlat(5 )
_lowercase: List[Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self ) -> str:
"""simple docstring"""
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
_lowercase: Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowercase: Optional[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Tuple = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
import faiss
_lowercase: Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Tuple = '''index.faiss'''
_lowercase: str = f'''mock://{index_name}'''
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: List[Any] = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Dict = 1
_lowercase , _lowercase: str = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: int = Elasticsearch()
_lowercase: Tuple = {'''acknowledged''': True}
_lowercase: Tuple = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_lowercase: Dict = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowercase: Optional[int] = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: List[Any] = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowercase: Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_lowercase: str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ )
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
_lowercase: List[str] = ['''foo''', '''bar''', '''foobar''']
_lowercase: Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ , request_timeout=30 )
_lowercase: Optional[Any] = [scores[0] for scores in total_scores]
_lowercase: Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 272 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=5 ):
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
__lowercase = torch.tensor(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(UpperCamelCase__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=UpperCamelCase__ , dim=0 )
__lowercase = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__lowercase = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(UpperCamelCase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(UpperCamelCase__ ) , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase__ , UpperCamelCase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ =CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ =CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ ="Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 616 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase__ =logging.getLogger(__name__)
UpperCAmelCase__ ="Hello world! cécé herlolip"
UpperCAmelCase__ =namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase__ , large=UpperCamelCase__ , share_emb=UpperCamelCase__ , use_bert_emb=UpperCamelCase__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(UpperCamelCase__ , lambda UpperCamelCase__ , UpperCamelCase__ : storage )
__lowercase = AbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) , UpperCamelCase__ )
original.eval()
__lowercase = BertAbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
__lowercase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
__lowercase = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase__ )) )
__lowercase = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
__lowercase = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase__ )) )
__lowercase = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
__lowercase = original.generator(UpperCamelCase__ )
__lowercase = new_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
__lowercase = new_model.generator(UpperCamelCase__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
__lowercase = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
UpperCAmelCase__ =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 616 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : str , _A : Tuple=13 , _A : int=7 , _A : Optional[int]=True , _A : List[Any]=True , _A : int=True , _A : int=True , _A : int=99 , _A : Optional[Any]=32 , _A : int=2 , _A : List[Any]=4 , _A : int=37 , _A : Union[str, Any]="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : Tuple=512 , _A : List[Any]=16 , _A : Union[str, Any]=2 , _A : Dict=0.02 , _A : List[Any]=3 , _A : Tuple=4 , _A : int=None , _A : Tuple=1000 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : int = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : List[Any] = range_bbox
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] , _A : int , _A : Any , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFLayoutLMModel(config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase_ , UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , _A : List[Any] , _A : Optional[int] , _A : Dict , _A : Optional[int] , _A : List[str] , _A : Dict , _A : List[str] , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : Tuple , _A : str , _A : str , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : str = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : str , _A : int , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : Dict , _A : Tuple , _A : Dict , _A : Optional[int] , _A : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : int = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = 10
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFLayoutLMModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
pass
def a__ ( ):
"""simple docstring"""
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
__SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE : str = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
__SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-3 ) )
# test the pooled output on [1, :3]
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : List[Any] = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__SCREAMING_SNAKE_CASE : List[str] = outputs.loss
__SCREAMING_SNAKE_CASE : Any = (2,)
self.assertEqual(loss.shape , UpperCamelCase_ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits
__SCREAMING_SNAKE_CASE : Optional[int] = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__SCREAMING_SNAKE_CASE : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Any = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
__SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
__SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the shape of the logits
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase_ )
| 704 |
def a__ ( snake_case , snake_case , snake_case=False ):
"""simple docstring"""
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : Dict = len(set_a.intersection(snake_case ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : str = len(snake_case ) + len(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = len(set_a.union(snake_case ) )
return intersection / union
if isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(snake_case ) + len(snake_case )
return len(snake_case ) / union
else:
__SCREAMING_SNAKE_CASE : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(snake_case ) / len(snake_case )
return len(snake_case ) / len(snake_case )
return None
if __name__ == "__main__":
lowercase_ = {"""a""", """b""", """c""", """d""", """e"""}
lowercase_ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 131 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self : str, **UpperCamelCase__ : List[str] ) -> Tuple:
_A = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def __UpperCAmelCase ( self : List[Any] ) -> Any:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__, beta_end=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__, prediction_type=UpperCamelCase__, sample_max_value=UpperCamelCase__, )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = self.dummy_sample_deter + 0.1
_A = self.dummy_sample_deter - 0.1
_A = samplea.shape[0]
_A = torch.stack([samplea, samplea, samplea], dim=0 )
_A = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1, UpperCamelCase__ )
_A = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
_A = scheduler.batch_step_no_noise(UpperCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
_A = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __UpperCAmelCase ( self : Dict ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
_A = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
_A = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__ ):
if i == len(UpperCamelCase__ ) - 1:
_A = -1
else:
_A = timesteps[i + 1]
_A = scheduler.previous_timestep(UpperCamelCase__ )
_A = prev_t.item()
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase__, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 1, 0]
_A = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__, timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = "timm_backbone"
def __init__(self : Any , snake_case : List[Any]=None , snake_case : int=3 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Any=None , **snake_case : int , ) -> Optional[int]:
super().__init__(**snake_case )
_lowercase : Dict = backbone
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = features_only
_lowercase : Tuple = use_pretrained_backbone
_lowercase : List[str] = True
_lowercase : Tuple = out_indices if out_indices is not None else (-1,)
| 461 | 0 |
import colorsys
from PIL import Image # type: ignore
def a__ ( a : float , a : float , a : int ):
"""simple docstring"""
_snake_case : Any = x
_snake_case : int = y
for step in range(a ): # noqa: B007
_snake_case : str = a * a - b * b + x
_snake_case : Tuple = 2 * a * b + y
_snake_case : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( a : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( a : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def a__ ( a : int = 800 , a : int = 600 , a : float = -0.6 , a : float = 0 , a : float = 3.2 , a : int = 50 , a : bool = True , ):
"""simple docstring"""
_snake_case : Any = Image.new("RGB" , (image_width, image_height) )
_snake_case : str = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
_snake_case : str = figure_width / image_width * image_height
_snake_case : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_snake_case : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_snake_case : List[str] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_snake_case : Tuple = get_color_coded_rgb(a )
else:
_snake_case : List[str] = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_a : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 712 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Optional[Any] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
__a : Optional[Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
__a : Optional[Any] = '</w>'
__a : List[Any] = '@@ '
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__A = set()
__A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A = char
return pairs
# Speech2Text2 has no max input length
__a : Tuple = {'facebook/s2t-wav2vec2-large-en-de': 1024}
class __lowercase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]="<s>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : List[Any]="<unk>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__A = do_lower_case
with open(__a , encoding="""utf-8""" ) as vocab_handle:
__A = json.load(__a )
__A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
__A = None
__A = None
else:
with open(__a , encoding="""utf-8""" ) as merges_handle:
__A = merges_handle.read().split("""\n""" )[:-1]
__A = [tuple(merge.split()[:2] ) for merge in merges]
__A = dict(zip(__a , range(len(__a ) ) ) )
__A = {}
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return len(self.decoder )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__A = get_pairs(__a )
if not pairs:
return token
while True:
__A = min(__a , key=lambda UpperCamelCase_ : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__A = bigram
__A = []
__A = 0
while i < len(__a ):
try:
__A = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A = tuple(__a )
__A = new_word
if len(__a ) == 1:
break
else:
__A = get_pairs(__a )
__A = """ """.join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__A = """\n""" + BPE_TOKEN_MERGES
if word.endswith(__a ):
__A = word.replace(__a , """""" )
__A = word.replace(""" """ , __a )
__A = word
return word
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : str ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
__A = text.lower()
__A = text.split()
__A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.decoder.get(__a , self.unk_token )
return result
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = """ """.join(__a )
# make sure @@ tokens are concatenated
__A = """""".join(string.split(__a ) )
return string
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__A = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
__A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__A = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 637 |
snake_case__ : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Tuple = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : List[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : List[str] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : Any = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : int = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Union[str, Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 278 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : List[str] = "blenderbot-small"
__lowerCamelCase : Tuple = ["past_key_values"]
__lowerCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , a_ : List[str]=5_02_65 , a_ : int=5_12 , a_ : Tuple=8 , a_ : Union[str, Any]=20_48 , a_ : int=16 , a_ : Tuple=8 , a_ : List[str]=20_48 , a_ : Optional[int]=16 , a_ : Union[str, Any]=0.0 , a_ : Any=0.0 , a_ : str=True , a_ : int=True , a_ : Optional[int]="gelu" , a_ : str=5_12 , a_ : Union[str, Any]=0.1 , a_ : str=0.0 , a_ : List[str]=0.0 , a_ : Union[str, Any]=0.02 , a_ : Optional[Any]=1 , a_ : Tuple=False , a_ : str=0 , a_ : Optional[Any]=1 , a_ : List[Any]=2 , a_ : Dict=2 , **a_ : str , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = vocab_size
a__ : Optional[int] = max_position_embeddings
a__ : Union[str, Any] = d_model
a__ : Tuple = encoder_ffn_dim
a__ : int = encoder_layers
a__ : Optional[Any] = encoder_attention_heads
a__ : Optional[Any] = decoder_ffn_dim
a__ : Optional[int] = decoder_layers
a__ : List[Any] = decoder_attention_heads
a__ : Any = dropout
a__ : Tuple = attention_dropout
a__ : Optional[Any] = activation_dropout
a__ : Union[str, Any] = activation_function
a__ : str = init_std
a__ : Tuple = encoder_layerdrop
a__ : str = decoder_layerdrop
a__ : Union[str, Any] = use_cache
a__ : Optional[int] = encoder_layers
a__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
class __UpperCAmelCase ( _UpperCamelCase ):
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ : List[Any] = {0: "batch"}
a__ : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a__ : Any = {0: "batch", 1: "decoder_sequence"}
a__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a__ , a__ : str = self.num_layers
for i in range(a_ ):
a__ : int = {0: "batch", 2: "past_sequence + sequence"}
a__ : int = {0: "batch", 2: "past_sequence + sequence"}
else:
a__ : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : Tuple = super().outputs
else:
a__ : Union[str, Any] = super(a_ , self ).outputs
if self.use_past:
a__ , a__ : Union[str, Any] = self.num_layers
for i in range(a_ ):
a__ : Any = {0: "batch", 2: "past_sequence + sequence"}
a__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCAmelCase ( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
# Generate decoder inputs
a__ : Union[str, Any] = seq_length if not self.use_past else 1
a__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
a__ : Optional[int] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
a__ : Union[str, Any] = dict(**a_ , **a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : List[Any] = common_inputs["input_ids"].shape
a__ : str = common_inputs["decoder_input_ids"].shape[1]
a__ , a__ : str = self.num_attention_heads
a__ : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : Union[str, Any] = decoder_seq_length + 3
a__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a__ : Any = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a_ , a_ )] , dim=1 )
a__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a__ , a__ : Union[str, Any] = self.num_layers
a__ : Tuple = min(a_ , a_ )
a__ : Dict = max(a_ , a_ ) - min_num_layers
a__ : Any = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
) )
# TODO: test this.
a__ : int = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a_ , a_ ):
common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) )
return common_inputs
def UpperCAmelCase ( self : List[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a__ , a__ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a__ : Union[str, Any] = seqlen + 2
a__ , a__ : List[str] = self.num_layers
a__ , a__ : int = self.num_attention_heads
a__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a__ : Optional[Any] = common_inputs["attention_mask"].dtype
a__ : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
a__ : int = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ )
]
return common_inputs
def UpperCAmelCase ( self : Union[str, Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : int = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ : Optional[int] = tokenizer.num_special_tokens_to_add(a_ )
a__ : Any = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
a__ : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a__ : Optional[Any] = dict(tokenizer(a_ , return_tensors=a_ ) )
return common_inputs
def UpperCAmelCase ( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
elif self.task == "causal-lm":
a__ : Any = self._generate_dummy_inputs_for_causal_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
else:
a__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
return common_inputs
def UpperCAmelCase ( self : Union[str, Any] , a_ : Optional[Any] , a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
a__ : List[str] = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ )
else:
a__ : Any = super(a_ , self )._flatten_past_key_values_(
a_ , a_ , a_ , a_ ) | 251 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : nn.ModuleList , lowerCAmelCase__ : nn.ModuleList , lowerCAmelCase__ : List[int] ) -> None:
'''simple docstring'''
a__ : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), F"{len(lowerCAmelCase__ )} != {len(lowerCAmelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
try:
a__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase__ ) )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase__ ( lowerCAmelCase__ : Union[str, PreTrainedModel] , lowerCAmelCase__ : Union[str, Path] = "student" , lowerCAmelCase__ : Union[int, None] = None , lowerCAmelCase__ : Union[int, None] = None , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Dict , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
a__ : int = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
AutoTokenizer.from_pretrained(lowerCAmelCase__ ).save_pretrained(lowerCAmelCase__ ) # purely for convenience
a__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).eval()
else:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), F"teacher must be a model or string got type {type(lowerCAmelCase__ )}"
a__ : Any = teacher.config.to_diff_dict()
try:
a__ , a__ : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ : Union[str, Any] = teacher_e
if d is None:
a__ : Optional[int] = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
a__ , a__ : Optional[int] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ : int = teacher_e
if d is None:
a__ : Tuple = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase__ )
# Copy weights
a__ : Optional[int] = teacher.config_class(**lowerCAmelCase__ )
a__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ : int = list(range(lowerCAmelCase__ ) ), list(range(lowerCAmelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(lowerCAmelCase__ , lowerCAmelCase__ )
if d_layers_to_copy is None:
a__ : List[int] = pick_layers_to_copy(lowerCAmelCase__ , lowerCAmelCase__ )
try:
if hasattr(
lowerCAmelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
a__ : Optional[Any] = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 251 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( A__ , unittest.TestCase ):
UpperCamelCase__ = DebertaVaTokenizer
UpperCamelCase__ = DebertaVaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
A__ = DebertaVaTokenizer(a__ , unk_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self , a__):
A__ = '''this is a test'''
A__ = '''this is a test'''
return input_text, output_text
def snake_case_ ( self):
A__ = '''<pad>'''
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__) , a__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__) , a__)
def snake_case_ ( self):
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<pad>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''[PAD]''')
self.assertEqual(len(a__) , 3_0_0_0_1)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0)
def snake_case_ ( self):
# fmt: off
A__ = ''' \tHeLLo!how \n Are yoU? '''
A__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''')
def snake_case_ ( self):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''')
def snake_case_ ( self):
pass
def snake_case_ ( self):
# fmt: off
A__ = '''I was born in 92000, and this is falsé.'''
A__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , split_by_punct=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , split_by_punct=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
# fmt: off
A__ = '''I was born in 92000, and this is falsé.'''
A__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
# fmt: off
A__ = '''I was born in 92000, and this is falsé.'''
A__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
# fmt: off
A__ = '''I was born in 92000, and this is falsé.'''
A__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
# fmt: off
A__ = ''' \tHeLLo!how \n Are yoU? '''
A__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__)
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__))
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__))
self.assertListEqual(a__ , a__)
A__ = tokenizer.encode(a__ , add_special_tokens=a__)
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(a__)
A__ = rust_tokenizer.encode(a__)
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
A__ = '''This is a test'''
A__ = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
A__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
A__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
A__ = DebertaVaTokenizer(a__ , keep_accents=a__)
A__ = DebertaVaTokenizerFast(a__ , keep_accents=a__)
A__ = tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
A__ = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
A__ = tokenizer.convert_ids_to_tokens(a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.convert_ids_to_tokens(a__)
self.assertListEqual(a__ , a__)
# fmt: off
A__ = '''I was born in 92000, and this is falsé.'''
A__ = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
A__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
A__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
A__ = tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
A__ = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
A__ = tokenizer.convert_ids_to_tokens(a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
A__ = rust_tokenizer.convert_ids_to_tokens(a__)
self.assertListEqual(a__ , a__)
def snake_case_ ( self):
A__ = DebertaVaTokenizer(a__)
A__ = tokenizer.encode('''sequence builders''')
A__ = tokenizer.encode('''multi-sequence build''')
A__ = tokenizer.build_inputs_with_special_tokens(a__)
A__ = tokenizer.build_inputs_with_special_tokens(a__ , a__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , )
@slow
def snake_case_ ( self):
# fmt: off
A__ = {'''input_ids''': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 632 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> Union[str, Any]:
A__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
A__ = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , UpperCamelCase_ )
if matches:
A__ = float(matches[1] )
A__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A__ = 1_0_0_1
A__ = '''imagenet-1k-id2label.json'''
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
A__ = '''background'''
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( )-> Tuple:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=False )-> Tuple:
A__ = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
A__ = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A__ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 3_2} , )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
A__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
A__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
A__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
A__ = '''google/''' + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 632 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCamelCase : List[Any] = "src/diffusers"
# Matches is_xxx_available()
__lowerCamelCase : List[Any] = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__lowerCamelCase : List[Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__lowerCamelCase : List[str] = "\n{0} = None\n"
__lowerCamelCase : Dict = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
__lowerCamelCase : Union[str, Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = _re_backend.findall(lowerCamelCase_ )
if len(lowerCamelCase_ ) == 0:
return None
return "_and_".join(lowerCamelCase_ )
def lowerCamelCase_() -> Any:
with open(os.path.join(lowerCamelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(lowerCamelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCamelCase_ ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if name.isupper():
return DUMMY_CONSTANT.format(lowerCamelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCamelCase_ , lowerCamelCase_ )
else:
return DUMMY_CLASS.format(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_=None ) -> List[str]:
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCamelCase_ , lowerCamelCase_ ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def lowerCamelCase_(lowerCamelCase_=False ) -> Optional[Any]:
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(lowerCamelCase_ , "utils" )
UpperCAmelCase = {
backend: os.path.join(lowerCamelCase_ , F'dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowerCamelCase_ , lowerCamelCase_ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCamelCase : Dict = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 457 |
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=A__ ):
lowercase : int =['''note_seq''']
def __init__( self : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 457 | 1 |
import math
import os
import sys
def A__ ( _a : str ):
'''simple docstring'''
snake_case__ : str =""""""
try:
with open(_a , """rb""" ) as binary_file:
snake_case__ : Tuple =binary_file.read()
for dat in data:
snake_case__ : List[Any] =f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def A__ ( _a : dict[str, str] , _a : str , _a : int , _a : str ):
'''simple docstring'''
lexicon.pop(_a )
snake_case__ : Optional[Any] =last_match_id
if math.loga(_a ).is_integer():
for curr_key in lexicon:
snake_case__ : List[str] ="""0""" + lexicon[curr_key]
snake_case__ : List[str] =bin(_a )[2:]
def A__ ( _a : str ):
'''simple docstring'''
snake_case__ : Optional[int] ={"""0""": """0""", """1""": """1"""}
snake_case__ , snake_case__ : Any ="""""", """"""
snake_case__ : Tuple =len(_a )
for i in range(len(_a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ : Optional[int] =lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_a , _a , _a , _a )
index += 1
snake_case__ : str =""""""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case__ : Any =lexicon[curr_string]
result += last_match_id
return result
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : Union[str, Any] =os.path.getsize(_a )
snake_case__ : str =bin(_a )[2:]
snake_case__ : Union[str, Any] =len(_a )
return "0" * (length_length - 1) + file_length_binary + compressed
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : Any =8
try:
with open(_a , """wb""" ) as opened_file:
snake_case__ : Any =[
to_write[i : i + byte_length]
for i in range(0 , len(_a ) , _a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_a , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : List[str] =read_file_binary(_a )
snake_case__ : Any =compress_data(_a )
snake_case__ : Union[str, Any] =add_file_length(_a , _a )
write_file_binary(_a , _a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 385 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : List[str] =tuple[int, int, int]
A__ : Dict =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A__ : Any ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
A__ : Optional[Any] ='EGZWVONAHDCLFQMSIPJBYUKXTR'
A__ : Dict ='FOBHMDKEXQNRAULPGSJVTYICZW'
A__ : List[Any] ='ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
A__ : Union[str, Any] ={
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
A__ : Any ='RMDJXFUWGISLHVTCQNKYPBEZOA'
A__ : Dict ='SGLCPQWZHKXAREONTFBVIYJUDM'
A__ : Union[str, Any] ='HVSICLTYKQUBXDWAJZOMFGPREN'
A__ : str ='RZWQHFMVDBKICJLNTUXAGYPSOE'
A__ : Tuple ='LFKIJODBEGAMQPXVUHYSTCZRWN'
A__ : Any ='KOAEGVDHXPQZMLFTYWJNBRCIUS'
def A_ ( __SCREAMING_SNAKE_CASE : RotorPositionT , __SCREAMING_SNAKE_CASE : RotorSelectionT , __SCREAMING_SNAKE_CASE : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(__SCREAMING_SNAKE_CASE ) )) < 3:
__A : Optional[Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(__SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
__A , __A , __A : Tuple = rotpos
if not 0 < rotorposa <= len(__SCREAMING_SNAKE_CASE ):
__A : Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(__SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(__SCREAMING_SNAKE_CASE ):
__A : List[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(__SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(__SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(__SCREAMING_SNAKE_CASE )
# Validates string and returns dict
__A : List[str] = _plugboard(__SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__A : List[str] = F"Plugboard setting isn't type string ({type(__SCREAMING_SNAKE_CASE )})"
raise TypeError(__SCREAMING_SNAKE_CASE )
elif len(__SCREAMING_SNAKE_CASE ) % 2 != 0:
__A : Any = F"Odd number of symbols ({len(__SCREAMING_SNAKE_CASE )})"
raise Exception(__SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
__A : Union[str, Any] = set()
for i in pbstring:
if i not in abc:
__A : Tuple = F"'{i}' not in list of symbols"
raise Exception(__SCREAMING_SNAKE_CASE )
elif i in tmppbl:
__A : Optional[Any] = F"Duplicate symbol ({i})"
raise Exception(__SCREAMING_SNAKE_CASE )
else:
tmppbl.add(__SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
__A : int = {}
for j in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 , 2 ):
__A : Optional[Any] = pbstring[j + 1]
__A : List[str] = pbstring[j]
return pb
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : RotorPositionT , __SCREAMING_SNAKE_CASE : RotorSelectionT = (rotora, rotora, rotora) , __SCREAMING_SNAKE_CASE : str = "" , ) -> str:
"""simple docstring"""
__A : List[str] = text.upper()
__A , __A , __A : Tuple = _validator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , plugb.upper() )
__A , __A , __A : Optional[Any] = rotor_position
__A , __A , __A : Any = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__A : int = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__A : Optional[Any] = plugboard[symbol]
# rotor ra --------------------------
__A : Union[str, Any] = abc.index(__SCREAMING_SNAKE_CASE ) + rotorposa
__A : str = rotora[index % len(__SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
__A : Union[str, Any] = abc.index(__SCREAMING_SNAKE_CASE ) + rotorposa
__A : Optional[Any] = rotora[index % len(__SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
__A : Optional[Any] = abc.index(__SCREAMING_SNAKE_CASE ) + rotorposa
__A : Dict = rotora[index % len(__SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__A : Optional[Any] = reflector[symbol]
# 2nd rotors
__A : Optional[Any] = abc[rotora.index(__SCREAMING_SNAKE_CASE ) - rotorposa]
__A : Union[str, Any] = abc[rotora.index(__SCREAMING_SNAKE_CASE ) - rotorposa]
__A : str = abc[rotora.index(__SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__A : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__SCREAMING_SNAKE_CASE ):
__A : int = 0
rotorposa += 1
if rotorposa >= len(__SCREAMING_SNAKE_CASE ):
__A : int = 0
rotorposa += 1
if rotorposa >= len(__SCREAMING_SNAKE_CASE ):
__A : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[int] ='This is my Python script that emulates the Enigma machine from WWII.'
A__ : str =(1, 1, 1)
A__ : Optional[int] ='pictures'
A__ : Optional[int] =(rotora, rotora, rotora)
A__ : int =enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 499 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int =logging.get_logger(__name__)
A__ : Union[str, Any] ={
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''dpr'''
def __init__( self : Optional[int] , lowerCamelCase : Tuple=3_05_22 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Tuple=12 , lowerCamelCase : Dict=30_72 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=5_12 , lowerCamelCase : List[Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Union[str, Any]=1e-1_2 , lowerCamelCase : int=0 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : int = 0 , **lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__A : Dict = vocab_size
__A : int = hidden_size
__A : str = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = intermediate_size
__A : str = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Dict = type_vocab_size
__A : Dict = initializer_range
__A : Union[str, Any] = layer_norm_eps
__A : int = projection_dim
__A : List[Any] = position_embedding_type
| 499 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase ( _A : str , _A : Tuple , _A : Tuple )-> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _A )
A__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A__ = dataset_size < in_memory_max_size
else:
A__ = False
A__ = is_small_dataset(_A )
assert result == expected
| 491 |
import unittest
from knapsack import knapsack as k
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 0
A__ = [0]
A__ = [0]
A__ = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 0 )
A__ = [60]
A__ = [10]
A__ = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 0 )
def __A ( self ):
A__ = 3
A__ = [1, 2, 3]
A__ = [3, 2, 1]
A__ = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 5 )
def __A ( self ):
A__ = 50
A__ = [60, 100, 120]
A__ = [10, 20, 30]
A__ = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 491 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
from __future__ import annotations
import numpy as np
def A ( lowercase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCamelCase__ , UpperCamelCase__ :str = np.shape(lowercase__ )
if rows != columns:
UpperCamelCase__ :str = (
"""'table' has to be of square shaped array but got a """
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowercase__ )
UpperCamelCase__ :Union[str, Any] = np.zeros((rows, columns) )
UpperCamelCase__ :int = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
UpperCamelCase__ :List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCamelCase__ :int = (table[i][j] - total) / upper[j][j]
UpperCamelCase__ :Optional[Any] = 1
for j in range(lowercase__ , lowercase__ ):
UpperCamelCase__ :int = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
UpperCamelCase__ :Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 45 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A__ : List[str] = None
A__ : str = logging.get_logger(__name__)
A__ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
A__ : int = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
A__ : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = MBartTokenizer
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , **A_ , ) -> str:
"""simple docstring"""
_lowercase: Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , **A_ , )
_lowercase: Dict = vocab_file
_lowercase: Optional[Any] = False if not self.vocab_file else True
_lowercase: Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_lowercase: Tuple = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowercase: List[str] = src_lang if src_lang is not None else '''en_XX'''
_lowercase: Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
_lowercase: Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowercase: Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowercase: Union[str, Any] = [self.sep_token_id]
_lowercase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , A_ , A_ , A_ , A_ , **A_ ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowercase: Tuple = src_lang
_lowercase: Tuple = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
_lowercase: Dict = self.convert_tokens_to_ids(A_ )
_lowercase: Union[str, Any] = tgt_lang_id
return inputs
def lowercase_ ( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , ) -> BatchEncoding:
"""simple docstring"""
_lowercase: int = src_lang
_lowercase: Tuple = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowercase: Any = self.convert_tokens_to_ids(A_ )
_lowercase: Optional[Any] = []
_lowercase: str = [self.eos_token_id, self.cur_lang_code]
_lowercase: Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase: Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase: List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowercase: List[Any] = self.convert_tokens_to_ids(A_ )
_lowercase: Optional[Any] = []
_lowercase: List[Any] = [self.eos_token_id, self.cur_lang_code]
_lowercase: Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase: Dict = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase: int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_lowercase: str = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 272 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowercase: int = timm.create_model('''levit_128s''' , pretrained=_UpperCamelCase )
else:
_lowercase: int = timm.create_model('''levit_128''' , pretrained=_UpperCamelCase )
if hidden_sizes == 192:
_lowercase: str = timm.create_model('''levit_192''' , pretrained=_UpperCamelCase )
if hidden_sizes == 256:
_lowercase: int = timm.create_model('''levit_256''' , pretrained=_UpperCamelCase )
if hidden_sizes == 384:
_lowercase: Dict = timm.create_model('''levit_384''' , pretrained=_UpperCamelCase )
from_model.eval()
_lowercase: Any = LevitForImageClassificationWithTeacher(_UpperCamelCase ).eval()
_lowercase: Union[str, Any] = OrderedDict()
_lowercase: Optional[Any] = from_model.state_dict()
_lowercase: List[Any] = list(from_model.state_dict().keys() )
_lowercase: int = list(our_model.state_dict().keys() )
print(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for i in range(len(_UpperCamelCase ) ):
_lowercase: Any = weights[og_keys[i]]
our_model.load_state_dict(_UpperCamelCase )
_lowercase: int = torch.randn((2, 3, 224, 224) )
_lowercase: List[str] = from_model(_UpperCamelCase )
_lowercase: Optional[int] = our_model(_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
_lowercase: Union[str, Any] = name
print(_UpperCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowercase: Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ):
"""simple docstring"""
_lowercase: int = '''imagenet-1k-id2label.json'''
_lowercase: int = 1_000
_lowercase: Tuple = (1, num_labels)
_lowercase: Dict = '''huggingface/label-files'''
_lowercase: Optional[int] = num_labels
_lowercase: List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowercase: List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowercase: int = idalabel
_lowercase: Tuple = {v: k for k, v in idalabel.items()}
_lowercase: Tuple = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
_lowercase: Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_lowercase: int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
A__ : Optional[Any] = parser.parse_args()
A__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 272 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = IFPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case_ ( self):
return self._get_dummy_components()
def snake_case_ ( self , a__ , a__=0):
if str(a__).startswith('''mps'''):
A__ = torch.manual_seed(a__)
else:
A__ = torch.Generator(device=a__).manual_seed(a__)
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def snake_case_ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def snake_case_ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def snake_case_ ( self):
self._test_save_load_local()
def snake_case_ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case_ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
# if
A__ = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
A__ = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
A__ , A__ = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A__ = None
A__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(a__ , a__ , a__ , a__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A__ = IFImgaImgPipeline(**pipe_a.components)
A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(a__ , a__ , a__ , a__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A__ = IFInpaintingPipeline(**pipe_a.components)
A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(a__ , a__ , a__ , a__)
def snake_case_ ( self , a__ , a__ , a__ , a__):
# pipeline 1
_start_torch_memory_measurement()
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(a__ , a__)
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(a__)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(a__ , a__)
def snake_case_ ( self , a__ , a__ , a__ , a__):
# pipeline 1
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(a__)
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(a__ , a__)
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(a__)
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(a__)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(a__ , a__)
def snake_case_ ( self , a__ , a__ , a__ , a__):
# pipeline 1
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(a__)
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(a__)
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(a__ , a__)
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='''cpu''').manual_seed(0)
A__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(a__)
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(a__)
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(a__)
A__ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(a__ , a__)
def lowerCAmelCase__ ( )-> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 632 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 4_0_0_0_0_0_0 ):
__a : List[Any] = [0, 1]
__a : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__a : Any = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> int:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , model.state_dict())
UpperCamelCase = os.path.join(lowerCamelCase_ , '''index.json''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(lowerCamelCase_ , F'{key}.dat')
self.assertTrue(os.path.isfile(lowerCamelCase_))
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=lowerCamelCase_)
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(lowerCamelCase_ , '''weight''' , lowerCamelCase_ , {})
UpperCamelCase = os.path.join(lowerCamelCase_ , '''weight.dat''')
self.assertTrue(os.path.isfile(lowerCamelCase_))
self.assertDictEqual(lowerCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCamelCase_).split('''.''')[1]}})
UpperCamelCase = load_offloaded_weight(lowerCamelCase_ , index['''weight'''])
self.assertTrue(torch.equal(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase_ , lowerCamelCase_)
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key]))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1''': 0, '''a.2''': 2})
UpperCamelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCamelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2}) | 34 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : List[str] = DetaImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetaImageProcessor()
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetaImageProcessor(format="coco_panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
def __init__( self :str , _lowerCamelCase :Dict , _lowerCamelCase :int=2 , _lowerCamelCase :Any=5_6 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :int=9_9 , _lowerCamelCase :Optional[int]=3_2 , _lowerCamelCase :int=2 , _lowerCamelCase :List[str]=2 , _lowerCamelCase :Dict=7 , _lowerCamelCase :Union[str, Any]="gelu_new" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :int=0.1 , _lowerCamelCase :Union[str, Any]=5_1_2 , _lowerCamelCase :Optional[Any]=1_6 , _lowerCamelCase :int=2 , _lowerCamelCase :str=0.0_2 , _lowerCamelCase :Optional[Any]=4 , _lowerCamelCase :Optional[Any]="block_sparse" , _lowerCamelCase :int=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :int=2 , _lowerCamelCase :int=3 , ):
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Any = use_attention_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : str = rescale_embeddings
__SCREAMING_SNAKE_CASE : str = attention_type
__SCREAMING_SNAKE_CASE : Dict = use_bias
__SCREAMING_SNAKE_CASE : List[Any] = block_size
__SCREAMING_SNAKE_CASE : Dict = num_random_blocks
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :Any ):
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase :Dict , _lowerCamelCase :Optional[int]=None , **_lowerCamelCase :List[str] ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : int = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :str , _lowerCamelCase :Dict , _lowerCamelCase :List[str]=1e-5 , _lowerCamelCase :Dict="outputs" , _lowerCamelCase :str=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 401 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_lowerCamelCase = '''path-to-your-trained-model'''
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_lowerCamelCase = '''A photo of sks dog in a bucket'''
_lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 401 | 1 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
lowerCAmelCase = abs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
lowerCAmelCase = abs(_SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return sum(int(_SCREAMING_SNAKE_CASE ) for c in str(abs(_SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_SCREAMING_SNAKE_CASE : Callable , _SCREAMING_SNAKE_CASE : int ) -> None:
lowerCAmelCase = f'{func.__name__}({value})'
lowerCAmelCase = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 433 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowerCAmelCase , ) | 433 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( a ):
_UpperCamelCase = """Wav2Vec2FeatureExtractor"""
_UpperCamelCase = """AutoTokenizer"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
A : List[Any] = self.feature_extractor
A : List[str] = False
@classmethod
def snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
try:
return super().from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _UpperCAmelCase , )
A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
A : List[Any] = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A : Union[str, Any] = kwargs.pop('''raw_speech''' )
else:
A : Any = kwargs.pop('''audio''' , _UpperCAmelCase )
A : Any = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
A : Dict = kwargs.pop('''text''' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A : Tuple = args[0]
A : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A : List[Any] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
A : Tuple = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : List[str] = encodings['''input_ids''']
return inputs
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase , **_UpperCAmelCase )
A : Optional[Any] = kwargs.pop('''input_features''' , _UpperCAmelCase )
A : Tuple = kwargs.pop('''labels''' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A : Dict = args[0]
A : Tuple = args[1:]
if input_features is not None:
A : Optional[int] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if labels is not None:
A : Dict = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A : List[str] = labels['''input_ids''']
return input_features
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@contextmanager
def snake_case ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A : Dict = True
A : Optional[int] = self.tokenizer
yield
A : Any = self.feature_extractor
A : Optional[int] = False
| 537 |
'''simple docstring'''
import requests
snake_case_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _lowerCamelCase( UpperCamelCase__ : str ) -> None:
# fetching a list of articles in json format
A : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 537 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a_ :Optional[int] = logging.getLogger()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
return args.f
def a ( A__ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(A__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a_ :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _UpperCAmelCase ):
@classmethod
def lowercase__ ( cls : int ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ : Any = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Any ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : int = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE__ : str = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE__ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Any = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Tuple = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[Any] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''translation_no_trainer''' ) ) )
@slow
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[str] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_results(_lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''image_classification_no_trainer''' ) ) )
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''KT''')
UpperCAmelCase = TypeVar('''VT''')
class A_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , snake_case = "root" , snake_case = None ):
lowercase = key
lowercase = value
lowercase = []
def __repr__( self ):
return F'''Node({self.key}: {self.value})'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.forward )
class A_ ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , snake_case = 0.5 , snake_case = 16 ):
lowercase = Node[KT, VT]()
lowercase = 0
lowercase = p
lowercase = max_level
def __str__( self ):
lowercase = list(self )
if len(snake_case ) == 0:
return F'''SkipList(level={self.level})'''
lowercase = max((len(str(snake_case ) ) for item in items) , default=4 )
lowercase = max(snake_case , 4 ) + 4
lowercase = self.head
lowercase = []
lowercase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(snake_case , '-' ) + '* ' * len(snake_case ) )
lines.append(' ' * label_size + '| ' * len(snake_case ) )
while len(node.forward ) != 0:
lowercase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(snake_case , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(snake_case ) )
lowercase = node.forward
lines.append('None'.ljust(snake_case ) + '* ' * len(snake_case ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(snake_case )
def __iter__( self ):
lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self._locate_node(snake_case )
if node is not None:
for i, update_node in enumerate(snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowercase = node.forward[i]
else:
lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase , lowercase = self._locate_node(snake_case )
if node is not None:
lowercase = value
else:
lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case ):
update_vector.append(self.head )
lowercase = level
lowercase = Node(snake_case , snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case )
else:
lowercase = new_node
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self._locate_node(snake_case )
if node is not None:
return node.value
return None
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowercase = skip_list.head
lowercase = {}
while node.level != 0:
lowercase = node.forward[0]
lowercase = node.value
assert len(__SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowercase = skip_list.head
lowercase = {}
while node.level != 0:
lowercase = node.forward[0]
lowercase = node.value
if len(__SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(__SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def UpperCAmelCase_ ( ):
lowercase = SkipList()
assert skip_list.find('Some key' ) is None
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(__SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCAmelCase_ ( ):
def is_sorted(__SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(__SCREAMING_SNAKE_CASE , lst[1:] ) )
lowercase = SkipList()
for i in range(10 ):
skip_list.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCAmelCase_ ( ):
lowercase = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 565 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(__SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = YosoForMaskedLM(__SCREAMING_SNAKE_CASE )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , __SCREAMING_SNAKE_CASE )
print(model.load_state_dict(__SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 565 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE : str = "true"
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=82 , SCREAMING_SNAKE_CASE_ : Dict=16 ):
"""simple docstring"""
set_seed(42 )
a_ : Tuple = RegressionModel()
a_ : Any = deepcopy(SCREAMING_SNAKE_CASE_ )
a_ : int = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
a_ , a_ : int = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Tuple=False ):
"""simple docstring"""
a_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
a_ : Any = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[Any] ):
a_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
a_ : Any = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
a_ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : int = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
a_ : int = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
a_ : List[str] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
a_ , a_ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : Optional[int] = []
for batch in dataloader:
a_ , a_ : Union[str, Any] = batch.values()
with torch.no_grad():
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
a_ , a_ : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a_ , a_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
a_ , a_ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : List[Any]=82 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[str]=16 ):
"""simple docstring"""
a_ , a_ , a_ : str = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a_ , a_ : int = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}"""
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
a_ : int = evaluate.load("""glue""" , """mrpc""" )
a_ , a_ : List[str] = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
a_ , a_ , a_ : int = setup["""no"""]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
a_ : str = model(**SCREAMING_SNAKE_CASE_ )
a_ : int = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch["""labels"""] )
a_ : str = metric.compute()
# Then do distributed
a_ , a_ , a_ : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a_ : int = model(**SCREAMING_SNAKE_CASE_ )
a_ : int = outputs.logits.argmax(dim=-1 )
a_ : int = batch["""labels"""]
a_ , a_ : str = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
a_ : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : List[str] = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a_ : List[Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
a_ : Optional[Any] = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 5_12 )
accelerator.state._reset_state()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 419 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "▁"
SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : Dict = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class snake_case__ ( __A ):
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
"""simple docstring"""
a_ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
a_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
a_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : Optional[int] = 1
a_ : int = len(self.sp_model ) + self.fairseq_offset
a_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = self.__dict__.copy()
a_ : List[Any] = None
a_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : int = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
a_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
a_ : Optional[int] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
a_ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 419 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
_lowerCamelCase : str = int(A_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Dict = 2
while i * i <= n:
while n % i == 0:
_lowerCamelCase : Tuple = i
n //= i
i += 1
if n > 1:
_lowerCamelCase : List[str] = n
return int(A_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 598 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = GPTaTokenizer
snake_case__ : str = GPTaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : Union[str, Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : int = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : str = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Tuple = '''This is a simple input'''
_lowerCamelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : List[str] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : int = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[Any] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''$$$'''
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : Any = '''This is a simple input'''
_lowerCamelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : str = '''Encode this.'''
_lowerCamelCase : Optional[Any] = '''This one too please.'''
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : str = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : Optional[int] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''bos'''
_lowerCamelCase : Optional[Any] = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : Any = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 598 | 1 |
"""simple docstring"""
import math
class __lowercase:
'''simple docstring'''
def __init__( self , __a=0 ): # a graph with Node 0,1,...,N-1
__lowerCamelCase : Any = n
__lowerCamelCase : Tuple = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # adjacency matrix for weight
__lowerCamelCase : Union[str, Any] = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # dp[i][j] stores minimum distance from i to j
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : List[Any] = w
def snake_case_ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowerCamelCase : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def snake_case_ ( self , __a , __a ):
return self.dp[u][v]
if __name__ == "__main__":
a_ : Optional[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 594 |
'''simple docstring'''
import random
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = a[left_index]
A : List[str] = left_index + 1
for j in range(left_index + 1 , snake_case__ ):
if a[j] < pivot:
A, A : Optional[int] = a[i], a[j]
i += 1
A, A : str = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if left < right:
A : Optional[Any] = random.randint(snake_case__ , right - 1 )
A, A : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A : Any = partition(snake_case__ , snake_case__ , snake_case__ )
quick_sort_random(
snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = input('''Enter numbers separated by a comma:\n''' ).strip()
A : List[str] = [int(snake_case__ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case__ , 0 , len(snake_case__ ) )
print(snake_case__ )
if __name__ == "__main__":
main()
| 634 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 718 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE : int = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : str = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Any = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Any , snake_case_ : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = image.size
SCREAMING_SNAKE_CASE : Union[str, Any] = max(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = 800 if 'detection' in checkpoint_url else 1000
SCREAMING_SNAKE_CASE : str = target_max_size / current_max_size
SCREAMING_SNAKE_CASE : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = F.to_tensor(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = F.normalize(snake_case_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] ) -> Tuple:
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : Dict = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE : List[str] = 125
SCREAMING_SNAKE_CASE : Dict = 6
SCREAMING_SNAKE_CASE : Optional[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE : Tuple = TableTransformerForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE : Optional[Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE : str = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=snake_case_ )
SCREAMING_SNAKE_CASE : Dict = Image.open(snake_case_ ).convert('RGB' )
SCREAMING_SNAKE_CASE : Optional[int] = normalize(resize(snake_case_ , snake_case_ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case_ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = (1, 15, 3)
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE : List[Any] = (1, 125, 7)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(snake_case_ )
image_processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 220 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "dandelin/vilt-b32-finetuned-vqa"
a_ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
a_ = "image_qa"
a_ = AutoProcessor
a_ = AutoModelForVisualQuestionAnswering
a_ = ["image", "text"]
a_ = ["text"]
def __init__( self : List[Any] , *__A : Any , **__A : List[Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__A , **__A )
def _lowercase ( self : Any , __A : "Image" , __A : str ):
return self.pre_processor(__A , __A , return_tensors="pt" )
def _lowercase ( self : List[str] , __A : List[Any] ):
with torch.no_grad():
return self.model(**__A ).logits
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 297 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : List[str] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : Tuple = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : List[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Any , __A : Tuple=None , __A : int=None , __A : Any=None , __A : Dict=None , __A : Optional[Any]="auto" , __A : Any=-1 , __A : Tuple=0.9 , __A : Tuple=5 , __A : int=5_0_0 , __A : Any="gpt2-large" , __A : List[str]=-1 , __A : List[Any]=1_0_2_4 , __A : int=2_5 , __A : Dict=5 , __A : Any=True , __A : str=2_5 , ):
snake_case__ : Dict = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 297 | 1 |
def __magic_name__ ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def __magic_name__ ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [[float("inf" ) for _ in range(lowercase_ )] for _ in range(lowercase_ )]
for i in range(lowercase_ ):
for j in range(lowercase_ ):
UpperCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowercase_ ):
# looping through rows of graph array
for i in range(lowercase_ ):
# looping through columns of graph array
for j in range(lowercase_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase = dist[i][k] + dist[k][j]
_print_dist(lowercase_ , lowercase_ )
return dist, v
if __name__ == "__main__":
__a : Optional[int] = int(input("""Enter number of vertices: """))
__a : List[Any] = int(input("""Enter number of edges: """))
__a : List[str] = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
__a : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
__a : int = int(input("""Enter source:"""))
__a : Union[str, Any] = int(input("""Enter destination:"""))
__a : Any = float(input("""Enter weight:"""))
__a : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 414 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(lowercase_ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __magic_name__ ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = _distribute_shards(**lowercase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = _split_gen_kwargs(lowercase_ , lowercase_ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __magic_name__ ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowercase_ ):
_number_of_shards_in_gen_kwargs(lowercase_ )
else:
UpperCamelCase = _number_of_shards_in_gen_kwargs(lowercase_ )
assert out == expected
| 414 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : str = KandinskyVaaPipeline
_UpperCamelCase : Dict = [
'image_embeds',
'negative_image_embeds',
]
_UpperCamelCase : Any = ['image_embeds', 'negative_image_embeds']
_UpperCamelCase : Dict = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase : Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ = UNetaDConditionModel(**a )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[Any] , a : Any=0 )-> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a ) ).to(a )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a )
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
lowercase__ = 'cpu'
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = pipe(**self.get_dummy_inputs(a ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowercase__ = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowercase__ = 'red cat, 4k photo'
lowercase__ = torch.Generator(device='cuda' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowercase__ = torch.Generator(device='cuda' ).manual_seed(0 )
lowercase__ = pipeline(
image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , output_type='np' , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(a , a )
| 235 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowerCAmelCase( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
lowerCAmelCase__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _lowerCAmelCase( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase__ = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> int:
lowerCAmelCase__ = dct.pop(UpperCAmelCase_ )
lowerCAmelCase__ = val
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
if "handwritten" in checkpoint_url:
lowerCAmelCase__ = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCAmelCase__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> str:
lowerCAmelCase__ = ViTConfig(image_size=384 , qkv_bias=UpperCAmelCase_ )
lowerCAmelCase__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase__ = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ = False
lowerCAmelCase__ = """relu"""
lowerCAmelCase__ = 1024
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
# load HuggingFace model
lowerCAmelCase__ = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ )
lowerCAmelCase__ = TrOCRForCausalLM(UpperCAmelCase_ )
lowerCAmelCase__ = VisionEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="""cpu""" , check_hash=UpperCAmelCase_ )["""model"""]
lowerCAmelCase__ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase__ = state_dict.pop(UpperCAmelCase_ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCAmelCase__ = val
else:
lowerCAmelCase__ = val
# load state dict
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image
lowerCAmelCase__ = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCAmelCase__ = TrOCRProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase__ = processor(images=prepare_img(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCAmelCase__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase__ = model(pixel_values=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase__ = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase__ = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase__ = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase__ = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCAmelCase_ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 211 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : List[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class UpperCamelCase ( _lowercase ):
__UpperCamelCase ="table-transformer"
__UpperCamelCase =["past_key_values"]
__UpperCamelCase ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=None , snake_case__ : List[Any]=3 , snake_case__ : Any=1_0_0 , snake_case__ : Optional[Any]=6 , snake_case__ : Optional[Any]=2_0_4_8 , snake_case__ : Any=8 , snake_case__ : List[str]=6 , snake_case__ : int=2_0_4_8 , snake_case__ : Optional[int]=8 , snake_case__ : Any=0.0 , snake_case__ : str=0.0 , snake_case__ : List[Any]=True , snake_case__ : List[str]="relu" , snake_case__ : List[str]=2_5_6 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[int]=0.02 , snake_case__ : List[Any]=1.0 , snake_case__ : Tuple=False , snake_case__ : List[str]="sine" , snake_case__ : Any="resnet50" , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=False , snake_case__ : Any=1 , snake_case__ : Any=5 , snake_case__ : Optional[int]=2 , snake_case__ : List[Any]=1 , snake_case__ : Tuple=1 , snake_case__ : List[Any]=5 , snake_case__ : int=2 , snake_case__ : Dict=0.1 , **snake_case__ : Tuple , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['''resnet'''](out_features=['stage4'] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(__lowerCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.d_model
class UpperCamelCase ( _lowercase ):
__UpperCamelCase =version.parse("1.11" )
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return 1_2
| 439 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=99 , lowerCAmelCase__=0 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__="last" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0 , ) -> Any:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_lengths
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = gelu_activation
SCREAMING_SNAKE_CASE = sinusoidal_embeddings
SCREAMING_SNAKE_CASE = causal
SCREAMING_SNAKE_CASE = asm
SCREAMING_SNAKE_CASE = n_langs
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_special
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = use_proj
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = bos_token_id
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __A ( self ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
SCREAMING_SNAKE_CASE = XLMModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , lengths=lowerCAmelCase__ , langs=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , langs=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
SCREAMING_SNAKE_CASE = XLMForQuestionAnsweringSimple(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = XLMForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , p_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Dict:
SCREAMING_SNAKE_CASE = XLMForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = XLMForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = XLMForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ : str = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = XLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=37 )
def __A ( self ) -> int:
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=1 ) -> Optional[Any]:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(
[isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase__ ) )
self.assertEqual(len(lowerCAmelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase__ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=1 ) -> Optional[Any]:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(
[isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase__ ) , )
self.assertEqual(len(lowerCAmelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase__ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE = min_length + idx + 1
SCREAMING_SNAKE_CASE = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase__ ) , )
pass
@slow
def __A ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = XLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president
SCREAMING_SNAKE_CASE = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase__ )
| 327 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = attention_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = (num_frames) * self.num_patches_per_frame + 1
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE = self.num_labels
return config
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
# verify the logits shape
SCREAMING_SNAKE_CASE = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __A ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __A ( self ) -> Any:
pass
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __A ( self ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.num_frames
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __A ( self ) -> int:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase () -> Optional[int]:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_video()
SCREAMING_SNAKE_CASE = image_processor(video[:8] , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 327 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , A__ : Distribution , A__ : str=None , A__ : Tuple=None , A__ : Tuple=0 ):
"""simple docstring"""
__lowerCamelCase : Dict = 1.0 if scale is None else scale
__lowerCamelCase : List[Any] = 0.0 if loc is None else loc
super().__init__(A__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A__ )] )
@property
def a_ ( self : Dict ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def a_ ( self : List[str] ):
"""simple docstring"""
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , A__ : int , A__ : Dict[str, int] , A__ : Callable[..., Tuple[torch.Tensor]] , **A__ : str ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : List[Any] = args_dim
__lowerCamelCase : int = nn.ModuleList([nn.Linear(A__ , A__ ) for dim in args_dim.values()] )
__lowerCamelCase : Optional[Any] = domain_map
def a_ ( self : List[Any] , A__ : torch.Tensor ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = [proj(A__ ) for proj in self.proj]
return self.domain_map(*A__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , A__ : Tuple ):
"""simple docstring"""
super().__init__()
__lowerCamelCase : Union[str, Any] = function
def a_ ( self : Optional[Any] , A__ : int , *A__ : Optional[int] ):
"""simple docstring"""
return self.function(A__ , *A__ )
class SCREAMING_SNAKE_CASE :
snake_case__ : type
snake_case__ : int
snake_case__ : Dict[str, int]
def __init__( self : int , A__ : int = 1 ):
"""simple docstring"""
__lowerCamelCase : Dict = dim
__lowerCamelCase : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def a_ ( self : Union[str, Any] , A__ : List[Any] ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*A__ )
else:
return Independent(self.distribution_class(*A__ ) , 1 )
def a_ ( self : Union[str, Any] , A__ : Union[str, Any] , A__ : Optional[torch.Tensor] = None , A__ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
__lowerCamelCase : List[Any] = self._base_distribution(A__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A__ , loc=A__ , scale=A__ , event_dim=self.event_dim )
@property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return len(self.event_shape )
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return 0.0
def a_ ( self : Union[str, Any] , A__ : int ):
"""simple docstring"""
return ParameterProjection(
in_features=A__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a_ ( self : str , *A__ : torch.Tensor ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def a_ ( A__ : torch.Tensor ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(A__ ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
snake_case__ : type = StudentT
@classmethod
def a_ ( cls : Optional[int] , A__ : torch.Tensor , A__ : torch.Tensor , A__ : torch.Tensor ):
"""simple docstring"""
__lowerCamelCase : List[str] = cls.squareplus(A__ ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCamelCase : Optional[Any] = 2.0 + cls.squareplus(A__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict[str, int] = {"loc": 1, "scale": 1}
snake_case__ : type = Normal
@classmethod
def a_ ( cls : List[Any] , A__ : torch.Tensor , A__ : torch.Tensor ):
"""simple docstring"""
__lowerCamelCase : str = cls.squareplus(A__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict[str, int] = {"total_count": 1, "logits": 1}
snake_case__ : type = NegativeBinomial
@classmethod
def a_ ( cls : Union[str, Any] , A__ : torch.Tensor , A__ : torch.Tensor ):
"""simple docstring"""
__lowerCamelCase : int = cls.squareplus(A__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a_ ( self : int , A__ : Dict ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Dict = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A__ , logits=A__ )
else:
return Independent(self.distribution_class(total_count=A__ , logits=A__ ) , 1 )
def a_ ( self : Union[str, Any] , A__ : Optional[Any] , A__ : Optional[torch.Tensor] = None , A__ : Optional[torch.Tensor] = None ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 150 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ :Union[str, Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'sequence-classification'
def __init__( self : int , A__ : Optional[int] ):
"""simple docstring"""
if type(A__ ) == dict:
__lowerCamelCase : str = Namespace(**A__ )
__lowerCamelCase : List[str] = glue_output_modes[hparams.task]
__lowerCamelCase : Dict = glue_tasks_num_labels[hparams.task]
super().__init__(A__ , A__ , self.mode )
def a_ ( self : Dict , **A__ : List[str] ):
"""simple docstring"""
return self.model(**A__ )
def a_ ( self : List[str] , A__ : List[Any] , A__ : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowerCamelCase : Any = self(**A__ )
__lowerCamelCase : Optional[int] = outputs[0]
__lowerCamelCase : Union[str, Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
__lowerCamelCase : str = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : str = self.hparams
__lowerCamelCase : Optional[Any] = processors[args.task]()
__lowerCamelCase : List[Any] = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCamelCase : Union[str, Any] = self._feature_file(A__ )
if os.path.exists(A__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__lowerCamelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__lowerCamelCase : Dict = convert_examples_to_features(
A__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , A__ )
torch.save(A__ , A__ )
def a_ ( self : Optional[Any] , A__ : str , A__ : int , A__ : bool = False ):
"""simple docstring"""
__lowerCamelCase : int = """dev""" if mode == """test""" else mode
__lowerCamelCase : List[str] = self._feature_file(A__ )
logger.info("""Loading features from cached file %s""" , A__ )
__lowerCamelCase : Any = torch.load(A__ )
__lowerCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCamelCase : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCamelCase : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A__ , A__ , A__ , A__ ) , batch_size=A__ , shuffle=A__ , )
def a_ ( self : Tuple , A__ : Optional[int] , A__ : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowerCamelCase : int = self(**A__ )
__lowerCamelCase , __lowerCamelCase : List[str] = outputs[:2]
__lowerCamelCase : str = logits.detach().cpu().numpy()
__lowerCamelCase : List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self : int , A__ : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__lowerCamelCase : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : Optional[int] = np.argmax(A__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : List[str] = np.squeeze(A__ )
__lowerCamelCase : List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__lowerCamelCase : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , A__ , A__ )}
__lowerCamelCase : int = dict(results.items() )
__lowerCamelCase : List[Any] = results
return ret, preds_list, out_label_list
def a_ ( self : List[Any] , A__ : list ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self._eval_end(A__ )
__lowerCamelCase : Optional[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self : Tuple , A__ : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self._eval_end(A__ )
__lowerCamelCase : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( A__ : Union[str, Any] , A__ : int ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(A__ , A__ )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=A__ , required=A__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def __lowercase () -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(_lowercase, os.getcwd() )
__lowerCamelCase : str = GLUETransformer.add_model_specific_args(_lowercase, os.getcwd() )
__lowerCamelCase : List[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCamelCase : int = os.path.join(
"""./results""", f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}", )
os.makedirs(args.output_dir )
__lowerCamelCase : Optional[int] = GLUETransformer(_lowercase )
__lowerCamelCase : str = generic_train(_lowercase, _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt""" ), recursive=_lowercase ) )
__lowerCamelCase : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 150 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCAmelCase__: Tuple = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase__: Tuple = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase__: Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCAmelCase__: List[Any] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCAmelCase__: Dict = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCAmelCase__: List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCAmelCase__: Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCAmelCase__: int = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCAmelCase__: Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCAmelCase__: Any = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCAmelCase__: Dict = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCAmelCase__: Tuple = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCAmelCase__: List[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCAmelCase__: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__: Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__: Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__: Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__: Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__: List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__: Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__: int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__: Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : int = FLAX_MODEL_MAPPING
lowerCAmelCase__: List[str] = auto_class_update(FlaxAutoModel)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__: Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__: Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__: Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__: Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__: Any = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__: Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__: Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__: Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__: Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__: Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__: str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class snake_case_ ( _BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__: Dict = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 710 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = ksize + 1
SCREAMING_SNAKE_CASE_ : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
SCREAMING_SNAKE_CASE_ : Any = x - ksize // 2
SCREAMING_SNAKE_CASE_ : Tuple = y - ksize // 2
# degree to radiant
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta / 180 * np.pi
SCREAMING_SNAKE_CASE_ : List[str] = np.cos(_theta )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.sin(_theta )
# get kernel x
SCREAMING_SNAKE_CASE_ : int = cos_theta * px + sin_theta * py
# get kernel y
SCREAMING_SNAKE_CASE_ : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
SCREAMING_SNAKE_CASE_ : List[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__: str = imread("../image_data/lena.jpg")
# turn image in gray scale value
lowerCAmelCase__: List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__: Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__: Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__: Any = out / out.max() * 255
lowerCAmelCase__: Tuple = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 311 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase_ ( a_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = ort.SessionOptions()
UpperCAmelCase = False
return options
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = """A red cat sitting on a park bench"""
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="""np""" , )
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = """A red cat sitting on a park bench"""
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="""np""" , )
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 673 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=sys.maxsize ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = """bilinear"""
UpperCAmelCase = max_size
UpperCAmelCase = short_edge_length
def __call__( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase , UpperCAmelCase = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase = newh * scale
UpperCAmelCase = neww * scale
UpperCAmelCase = int(neww + 0.5 )
UpperCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase = Image.fromarray(snake_case__ )
UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase = np.asarray(snake_case__ )
else:
UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class UpperCamelCase_ :
def __init__( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase = cfg.INPUT.FORMAT
UpperCAmelCase = cfg.SIZE_DIVISIBILITY
UpperCAmelCase = cfg.PAD_VALUE
UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase = cfg.MODEL.DEVICE
UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase = [im.shape[-2:] for im in images]
UpperCAmelCase = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self , snake_case__ , snake_case__=False ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
assert torch.isfinite(lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase )
| 673 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ : Tuple = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowercase_ : List[str] = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowercase_ : str = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowercase_ : int = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295 | 1 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if not sentence:
return ""
lowerCamelCase__ : Tuple = dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=14 , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=99 , UpperCAmelCase : str=32 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : List[Any]=0.0_2 , ) -> List[Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Optional[Any] = rotary_dim
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
def A_ ( self : str ) -> int:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs
lowerCamelCase__ : List[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Tuple = 20
lowerCamelCase__ : Dict = model_class_name(UpperCAmelCase )
lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Any = 20
lowerCamelCase__ : Any = model_class_name(UpperCAmelCase )
lowerCamelCase__ : List[str] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : str = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Optional[Any] = FlaxGPTJModelTester(self )
def A_ ( self : Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCamelCase__ : Dict = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCamelCase__ : str = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[int] = model.config.eos_token_id
lowerCamelCase__ : Union[str, Any] = jax.jit(model.generate )
lowerCamelCase__ : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCamelCase__ : int = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Dict = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = pt_inputs['input_ids'].shape
lowerCamelCase__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Dict = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCamelCase__ : Tuple = fx_state
with torch.no_grad():
lowerCamelCase__ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : Dict = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCamelCase__ : int = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A_ ( self : Union[str, Any] ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : int = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : List[str] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = pt_inputs['input_ids'].shape
lowerCamelCase__ : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Any = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : List[Any] = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A_ ( self : List[str] ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 295 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a__ : List[str] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a__ : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase, __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A__ ( ):
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])', number=1_0_0_0_0, globals=globals(), ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])', number=1_0_0_0_0, globals=globals(), ) )
benchmark()
| 309 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : str = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Optional[int] = "mvp"
UpperCamelCase : Optional[Any] = ["past_key_values"]
UpperCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __magic_name__=5_0_2_6_7 , __magic_name__=1_0_2_4 , __magic_name__=1_2 , __magic_name__=4_0_9_6 , __magic_name__=1_6 , __magic_name__=1_2 , __magic_name__=4_0_9_6 , __magic_name__=1_6 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__="gelu" , __magic_name__=1_0_2_4 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=0.0 , __magic_name__=False , __magic_name__=True , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=True , __magic_name__=2 , __magic_name__=2 , __magic_name__=False , __magic_name__=1_0_0 , __magic_name__=8_0_0 , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = use_prompt
_lowerCAmelCase = prompt_length
_lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __magic_name__ ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 309 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'xglm'
A = ['past_key_values']
A = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self :Optional[Any] , lowerCamelCase_ :List[Any]=2_5_6_0_0_8 , lowerCamelCase_ :int=2_0_4_8 , lowerCamelCase_ :Optional[int]=1_0_2_4 , lowerCamelCase_ :List[str]=4_0_9_6 , lowerCamelCase_ :int=2_4 , lowerCamelCase_ :str=1_6 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Tuple=0.02 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Any=2 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :List[Any] , ) -> str:
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = ffn_dim
UpperCamelCase__ = num_layers
UpperCamelCase__ = attention_heads
UpperCamelCase__ = activation_function
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = init_std
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , ) | 516 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Union[str, Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['ConditionalDetrFeatureExtractor']
A : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 516 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase = logging.getLogger()
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Union[str, Any]:
a__ : Optional[Any] = {}
a__ : Any = os.path.join(__UpperCamelCase , "all_results.json" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , "r" ) as f:
a__ : Optional[int] = json.load(__UpperCamelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
lowerCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
import xla_spawn
a__ : str = self.get_auto_remove_tmp_dir()
a__ : Dict = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
a__ : Tuple = time()
xla_spawn.main()
a__ : Optional[Any] = time()
a__ : Optional[int] = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _A ( self ):
"""simple docstring"""
import xla_spawn
a__ : Optional[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
xla_spawn.main()
| 207 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> list:
if len(__UpperCamelCase ) == 0:
return []
a__ , a__ : int = min(__UpperCamelCase ), max(__UpperCamelCase )
a__ : Optional[int] = int(max_value - min_value ) + 1
a__ : list[list] = [[] for _ in range(__UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__UpperCamelCase )
return [v for bucket in buckets for v in sorted(__UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 207 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Optional[int] ="beit"
def __init__( self : Any , _snake_case : str=8192 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[Any]=12 , _snake_case : str=3072 , _snake_case : str="gelu" , _snake_case : Any=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[int]=0.02 , _snake_case : Optional[int]=1E-12 , _snake_case : List[str]=224 , _snake_case : int=16 , _snake_case : List[str]=3 , _snake_case : Optional[Any]=False , _snake_case : List[str]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Dict=0.1 , _snake_case : int=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : str=[1, 2, 3, 6] , _snake_case : Optional[int]=True , _snake_case : Tuple=0.4 , _snake_case : Tuple=256 , _snake_case : Dict=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(**_snake_case )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = use_mask_token
a__ = use_absolute_position_embeddings
a__ = use_relative_position_bias
a__ = use_shared_relative_position_bias
a__ = layer_scale_init_value
a__ = drop_path_rate
a__ = use_mean_pooling
# decode head attributes (semantic segmentation)
a__ = out_indices
a__ = pool_scales
# auxiliary head attributes (semantic segmentation)
a__ = use_auxiliary_head
a__ = auxiliary_loss_weight
a__ = auxiliary_channels
a__ = auxiliary_num_convs
a__ = auxiliary_concat_input
a__ = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Any =version.parse("1.11" )
@property
def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCAmelCase ( self : str ) -> float:
'''simple docstring'''
return 1E-4
| 232 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ():
"""simple docstring"""
_UpperCamelCase =ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
_UpperCamelCase , _UpperCamelCase =parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
_UpperCamelCase =parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
_UpperCamelCase =args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase : List[Any] = 'bert-base-cased'
__lowerCamelCase : str = 'fp16'
__lowerCamelCase : Optional[int] = 'bf16'
__lowerCamelCase : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
_UpperCamelCase =dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCamelCase__ ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =F'''{i + 1}'''
_UpperCamelCase =strategy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =state_dict_type
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase =AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCamelCase ='''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_UpperCamelCase ='''2000'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''TRANSFORMER_BASED_WRAP'''
_UpperCamelCase ='''T5Layer'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase ='''SIZE_BASED_WRAP'''
_UpperCamelCase ='''0'''
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase__ ( self : Any ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =mp_dtype
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =Accelerator()
if mp_dtype == "fp16":
_UpperCamelCase =torch.floataa
elif mp_dtype == "bf16":
_UpperCamelCase =torch.bfloataa
_UpperCamelCase =MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCamelCase =self.dist_env.copy()
_UpperCamelCase =str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
_UpperCamelCase =FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
super().setUp()
_UpperCamelCase =0.82
_UpperCamelCase =[
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_UpperCamelCase ={
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCamelCase =160
_UpperCamelCase =160
_UpperCamelCase =inspect.getfile(accelerate.test_utils )
_UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_UpperCamelCase =['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_UpperCamelCase =cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Optional[Any] ) -> Any:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCamelCase =cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_UpperCamelCase =len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCamelCase =cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
_UpperCamelCase =cmd_config[:-1]
_UpperCamelCase =os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def UpperCamelCase__ ( self : Dict ) -> int:
_UpperCamelCase =os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_UpperCamelCase =[
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCamelCase =cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 271 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE = {'mobilebert-uncased': 5_1_2}
SCREAMING_SNAKE_CASE = {}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = MobileBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
__a = getattr(__A , normalizer_state.pop("""type""" ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__A )
__a = do_lower_case
def snake_case_ ( self , __A , __A=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , __A , __A = None ):
__a = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 99 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
snake_case__ : List[str] = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
snake_case__ : Optional[Any] = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
snake_case__ : Optional[int] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
snake_case__ : List[Any] = sorted(arg_to_scheduler.keys())
snake_case__ : str = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : argparse.Namespace , lowerCamelCase : List[Any]=None , lowerCamelCase : str="base" , lowerCamelCase : int=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase )
__lowercase = 0
__lowercase = Path(self.hparams.output_dir )
__lowercase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowercase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=lowerCamelCase , **lowerCamelCase , )
else:
__lowercase = config
__lowercase = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , lowerCamelCase , lowerCamelCase ):
assert hasattr(self.config , lowerCamelCase ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , lowerCamelCase , getattr(self.hparams , lowerCamelCase ) )
if tokenizer is None:
__lowercase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCamelCase , )
else:
__lowercase = tokenizer
__lowercase = MODEL_MODES[mode]
if model is None:
__lowercase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowerCamelCase , )
else:
__lowercase = model
def _snake_case ( self : List[str] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = self.model_type.from_pretrained(*lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = arg_to_scheduler[self.hparams.lr_scheduler]
__lowercase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__lowercase = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.model
__lowercase = ["bias", "LayerNorm.weight"]
__lowercase = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__lowercase = Adafactor(
lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=lowerCamelCase , relative_step=lowerCamelCase )
else:
__lowercase = AdamW(
lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__lowercase = optimizer
__lowercase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any ):
'''simple docstring'''
return self.validation_step(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Tuple , lowerCamelCase : Any ):
'''simple docstring'''
return self.validation_end(lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__lowercase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
if stage == "test":
__lowercase = len(self.test_dataloader().dataset )
else:
__lowercase = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=lowerCamelCase )
__lowercase = len(self.train_dataloader().dataset )
def _snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : bool = False ):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return self.train_loader
def _snake_case ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : Any ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
lowerCamelCase , list(filter(lowerCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _snake_case ( self : Optional[int] , lowerCamelCase : Dict[str, Any] ):
'''simple docstring'''
__lowercase = self.output_dir.joinpath("best_tfmr" )
__lowercase = self.step_count
self.model.save_pretrained(lowerCamelCase )
self.tokenizer.save_pretrained(lowerCamelCase )
@staticmethod
def _snake_case ( lowerCamelCase : Any , lowerCamelCase : List[str] ):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=lowerCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=lowerCamelCase , type=lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(lowerCamelCase ).parent / "test_run" / "cache" ) , type=lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=lowerCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=lowerCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=lowerCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=lowerCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=lowerCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=lowerCamelCase , metavar=lowerCamelCase , type=lowerCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=lowerCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=lowerCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=lowerCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=lowerCamelCase )
parser.add_argument("--train_batch_size" , default=32 , type=lowerCamelCase )
parser.add_argument("--eval_batch_size" , default=32 , type=lowerCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _A ( pl.Callback ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _A ( pl.Callback ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase )
class _A ( pl.Callback ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = trainer.lr_schedulers[0]["scheduler"]
__lowercase = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase )
def _snake_case ( self : List[str] , lowerCamelCase : pl.Trainer , lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
__lowercase = trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) )
def _snake_case ( self : List[str] , lowerCamelCase : pl.Trainer , lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Test results *****" )
__lowercase = trainer.callback_metrics
# Log and save results to file
__lowercase = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(lowerCamelCase , "w" ) as writer:
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(lowerCamelCase , str(metrics[key] ) ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "model_checkpoints" ) , type=_SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_SCREAMING_SNAKE_CASE , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_SCREAMING_SNAKE_CASE , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=4_2 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-train-data" ) , type=_SCREAMING_SNAKE_CASE , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[] , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
pl.seed_everything(args.seed )
# init model
__lowercase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
__lowercase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
__lowercase = LoggingCallback()
__lowercase = {}
if args.fpaa:
__lowercase = 1_6
if args.gpus > 1:
__lowercase = "auto"
__lowercase = "ddp"
__lowercase = args.accumulate_grad_batches
__lowercase = None
__lowercase = "auto"
__lowercase = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 655 |
from __future__ import annotations
from typing import Any
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _snake_case ( self : Dict , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase , __lowercase , __lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def snake_case_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__SCREAMING_SNAKE_CASE : Any = {'''UserAgent''': UserAgent().random}
def a_ ( UpperCamelCase_ ):
A_ = script.contents[0]
A_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Any ):
A_ = F"https://www.instagram.com/{username}/"
A_ = self.get_json()
def _UpperCAmelCase ( self : int ):
A_ = requests.get(self.url , headers=lowerCAmelCase ).text
A_ = BeautifulSoup(lowerCAmelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[int] ):
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self : Tuple ):
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCAmelCase ( self : Tuple ):
return self.user_data["username"]
@property
def _UpperCAmelCase ( self : Tuple ):
return self.user_data["full_name"]
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.user_data["biography"]
@property
def _UpperCAmelCase ( self : Tuple ):
return self.user_data["business_email"]
@property
def _UpperCAmelCase ( self : List[str] ):
return self.user_data["external_url"]
@property
def _UpperCAmelCase ( self : List[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCAmelCase ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCAmelCase ( self : str ):
return self.user_data["is_verified"]
@property
def _UpperCAmelCase ( self : Any ):
return self.user_data["is_private"]
def a_ ( UpperCamelCase_ = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
A_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Optional[int] = InstagramUser('''github''')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }")
| 452 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__SCREAMING_SNAKE_CASE : Tuple = {value: key for key, value in encode_dict.items()}
def a_ ( UpperCamelCase_ ):
A_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def a_ ( UpperCamelCase_ ):
if set(UpperCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
A_ = ""
for word in coded.split():
while len(UpperCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
A_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 452 | 1 |
"""simple docstring"""
import pprint
import requests
__snake_case = """https://zenquotes.io/api"""
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
__snake_case = random_quotes()
pprint.pprint(response)
| 706 |
"""simple docstring"""
import numpy as np
class _lowerCAmelCase :
def __init__( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = (0, 0)
snake_case : str = None
snake_case : int = 0
snake_case : Optional[Any] = 0
snake_case : Tuple = 0
def __eq__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self.position == cell.position
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
print(self.position )
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__=(5, 5) ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = np.zeros(UpperCamelCase__ )
snake_case : str = world_size[0]
snake_case : str = world_size[1]
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
print(self.w )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : Tuple = cell.position[0]
snake_case : str = cell.position[1]
snake_case : List[Any] = []
for n in neughbour_cord:
snake_case : Any = current_x + n[0]
snake_case : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : Tuple = Cell()
snake_case : int = (x, y)
snake_case : List[Any] = cell
neighbours.append(UpperCamelCase__ )
return neighbours
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case : Tuple = []
snake_case : List[Any] = []
_open.append(lowercase )
while _open:
snake_case : Union[str, Any] = np.argmin([n.f for n in _open] )
snake_case : Tuple = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
snake_case : int = current.g + 1
snake_case ,snake_case : Any = n.position
snake_case ,snake_case : Any = goal.position
snake_case : int = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
snake_case : str = []
while current.parent is not None:
path.append(current.position )
snake_case : List[str] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case = Gridworld()
# Start position and goal
__snake_case = Cell()
__snake_case = (0, 0)
__snake_case = Cell()
__snake_case = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case = 1
print(world.w)
| 117 | 0 |
from __future__ import annotations
__magic_name__: Tuple = tuple[int, int, int]
__magic_name__: Union[str, Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__magic_name__: List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__magic_name__: str = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__magic_name__: str = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__magic_name__: List[Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__magic_name__: Union[str, Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__magic_name__: Tuple = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__magic_name__: Any = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__magic_name__: Optional[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__magic_name__: Dict = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__magic_name__: List[str] = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__magic_name__: Optional[int] = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if (unique_rotsel := len(set(lowercase_ ) )) < 3:
__magic_name__ : List[Any] = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowercase_ )
# Checks if rotor positions are valid
__magic_name__ ,__magic_name__ ,__magic_name__ : Any = rotpos
if not 0 < rotorposa <= len(lowercase_ ):
__magic_name__ : Any = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
__magic_name__ : Union[str, Any] = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase_ )
if not 0 < rotorposa <= len(lowercase_ ):
__magic_name__ : Union[str, Any] = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase_ )
# Validates string and returns dict
__magic_name__ : Tuple = _plugboard(lowercase_ )
return rotpos, rotsel, pbdict
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(lowercase_, lowercase_ ):
__magic_name__ : str = f'Plugboard setting isn\'t type string ({type(lowercase_ )})'
raise TypeError(lowercase_ )
elif len(lowercase_ ) % 2 != 0:
__magic_name__ : Dict = f'Odd number of symbols ({len(lowercase_ )})'
raise Exception(lowercase_ )
elif pbstring == "":
return {}
pbstring.replace(""" """, """""" )
# Checks if all characters are unique
__magic_name__ : str = set()
for i in pbstring:
if i not in abc:
__magic_name__ : int = f'\'{i}\' not in list of symbols'
raise Exception(lowercase_ )
elif i in tmppbl:
__magic_name__ : List[Any] = f'Duplicate symbol ({i})'
raise Exception(lowercase_ )
else:
tmppbl.add(lowercase_ )
del tmppbl
# Created the dictionary
__magic_name__ : Dict = {}
for j in range(0, len(lowercase_ ) - 1, 2 ):
__magic_name__ : Union[str, Any] = pbstring[j + 1]
__magic_name__ : List[str] = pbstring[j]
return pb
def UpperCamelCase ( _A, _A, _A = (rotora, rotora, rotora), _A = "", ):
"""simple docstring"""
__magic_name__ : Any = text.upper()
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[Any] = _validator(
lowercase_, lowercase_, plugb.upper() )
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = rotor_position
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__magic_name__ : int = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__magic_name__ : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
__magic_name__ : Dict = abc.index(lowercase_ ) + rotorposa
__magic_name__ : Optional[int] = rotora[index % len(lowercase_ )]
# rotor rb --------------------------
__magic_name__ : Tuple = abc.index(lowercase_ ) + rotorposa
__magic_name__ : List[str] = rotora[index % len(lowercase_ )]
# rotor rc --------------------------
__magic_name__ : int = abc.index(lowercase_ ) + rotorposa
__magic_name__ : Any = rotora[index % len(lowercase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__magic_name__ : Dict = reflector[symbol]
# 2nd rotors
__magic_name__ : Optional[int] = abc[rotora.index(lowercase_ ) - rotorposa]
__magic_name__ : Tuple = abc[rotora.index(lowercase_ ) - rotorposa]
__magic_name__ : str = abc[rotora.index(lowercase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__magic_name__ : List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__magic_name__ : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__magic_name__ : Dict = 0
rotorposa += 1
if rotorposa >= len(lowercase_ ):
__magic_name__ : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
__magic_name__: Union[str, Any] = """This is my Python script that emulates the Enigma machine from WWII."""
__magic_name__: str = (1, 1, 1)
__magic_name__: Optional[Any] = """pictures"""
__magic_name__: List[str] = (rotora, rotora, rotora)
__magic_name__: Any = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 324 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : Union[str, Any] = 6_356_752.314_245
_lowerCamelCase : List[Any] = 6378137
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import sys
_A : List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __snake_case ( lowerCAmelCase_ = N ) -> int:
SCREAMING_SNAKE_CASE__ = -sys.maxsize - 1
for i in range(len(lowerCAmelCase_ ) - 1_2 ):
SCREAMING_SNAKE_CASE__ = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
SCREAMING_SNAKE_CASE__ = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 100 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : int = True , ) -> List[str]:
if audio_length_in_s is None:
a_ : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
a_ : Optional[int] = audio_length_in_s * self.unet.config.sample_rate
a_ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
a_ : List[str] = int(__UpperCamelCase )
if sample_size % down_scale_factor != 0:
a_ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
a_ : Any = int(__UpperCamelCase )
a_ : Tuple = next(iter(self.unet.parameters() ) ).dtype
a_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
a_ : int = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device )
a_ : str = self.scheduler.timesteps.to(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a_ : Any = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
a_ : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
a_ : Tuple = audio.clamp(-1 , 1 ).float().cpu().numpy()
a_ : Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCamelCase )
| 702 |
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : List[Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCAmelCase_ : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCAmelCase_ : Optional[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="binary" , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Any:
a_ : List[Any] = fa_score(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , pos_label=SCREAMING_SNAKE_CASE__ , average=SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ )
return {"f1": float(SCREAMING_SNAKE_CASE__ ) if score.size == 1 else score}
| 443 | 0 |
from __future__ import annotations
UpperCamelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
UpperCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__A ) )
] # the reference grid
UpperCAmelCase__ = 1
UpperCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__A ) )
] # the action grid
UpperCAmelCase__ = init[0]
UpperCAmelCase__ = init[1]
UpperCAmelCase__ = 0
UpperCAmelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase__ = [[f, g, x, y]]
UpperCAmelCase__ = False # flag that is set when search is complete
UpperCAmelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(__A ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase__ = cell.pop()
UpperCAmelCase__ = next_cell[2]
UpperCAmelCase__ = next_cell[3]
UpperCAmelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase__ = True
else:
for i in range(len(__A ) ): # to try out different valid actions
UpperCAmelCase__ = x + DIRECTIONS[i][0]
UpperCAmelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase__ = g + cost
UpperCAmelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase__ = 1
UpperCAmelCase__ = i
UpperCAmelCase__ = []
UpperCAmelCase__ = goal[0]
UpperCAmelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase__ = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase__ = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase__ = xa
UpperCAmelCase__ = ya
invpath.append([x, y] )
UpperCAmelCase__ = []
for i in range(len(__A ) ):
path.append(invpath[len(__A ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase__ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase__ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase__ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase__ = 9_9
UpperCamelCase__ , UpperCamelCase__ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 486 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'open-llama'
def __init__(self : Optional[Any] , __UpperCAmelCase : int=1_0_0_0_0_0 , __UpperCAmelCase : Tuple=4_0_9_6 , __UpperCAmelCase : List[str]=1_1_0_0_8 , __UpperCAmelCase : List[str]=3_2 , __UpperCAmelCase : int=3_2 , __UpperCAmelCase : Dict="silu" , __UpperCAmelCase : int=2_0_4_8 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : int=1E-6 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : int , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = rms_norm_eps
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = kwargs.pop(
"use_memorry_efficient_attention" , __UpperCAmelCase )
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_dropout_prob
UpperCAmelCase__ = use_stable_embedding
UpperCAmelCase__ = shared_input_output_embedding
UpperCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase__ = self.rope_scaling.get("type" , __UpperCAmelCase )
UpperCAmelCase__ = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 486 | 1 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__ ( *__lowercase : Optional[int] ) -> int:
"""simple docstring"""
with open(__lowercase , 'r' ) as fh:
fcntl.flock(__lowercase , fcntl.LOCK_EX )
try:
print(*__lowercase )
finally:
fcntl.flock(__lowercase , fcntl.LOCK_UN )
a__ : List[Any] =int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
a__ : Optional[int] =torch.device('''cuda''', local_rank)
a__ : List[str] =socket.gethostname()
a__ : Dict =f'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a__ : int =dist.get_rank()
a__ : Any =dist.get_world_size()
printflock(f'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(f'{gpu} is broken')
raise
| 434 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : str , __A : List[Any]=7 , __A : Union[str, Any]=3 , __A : Optional[Any]=3_0 , __A : str=4_0_0 , __A : Tuple=True , __A : Any=None , __A : int=True , __A : Dict=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : str=1 / 2_5_5 , __A : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
def _lowerCamelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : List[Any] , __A : List[str] , __A : Union[str, Any]=False ):
if not batched:
__UpperCamelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase = int(self.size['shortest_edge'] * h / w )
__UpperCamelCase = self.size['shortest_edge']
elif w > h:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = self.size['shortest_edge']
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCamelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowerCamelCase ( self : int ):
pass
def _lowerCamelCase ( self : Optional[Any] ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Tuple ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Dict ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : Dict ):
# prepare image and target
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__UpperCamelCase = DeformableDetrImageProcessor()
__UpperCamelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCamelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
# prepare image, target and masks_path
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
__UpperCamelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCamelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCamelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 434 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[Any] = KandinskyImgaImgPipeline
__lowercase : int = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : List[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : List[Any] = False
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__snake_case = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
__snake_case = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__snake_case = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_unet
__snake_case = self.dummy_movq
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__snake_case = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__snake_case = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[str]:
'''simple docstring'''
__snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = '''cpu'''
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__snake_case = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__snake_case = output.images
__snake_case = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__snake_case = '''A red cartoon frog, 4k'''
__snake_case = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__snake_case = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__snake_case = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case , __snake_case = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__snake_case = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a__ : Optional[int] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
a__ : Optional[int] = 1_0
a__ : List[str] = 2_5_6
def snake_case (UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
if len(UpperCamelCase ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=UpperCamelCase )
for token in set(UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def snake_case (UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(UpperCamelCase ) if len(t.strip() ) > 0}
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , *,
a_ : float = 0.8_5 , ):
"""simple docstring"""
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(a_ )
def _UpperCamelCase ( self : Union[str, Any] , a_ : Tuple , a_ : MinHash ):
"""simple docstring"""
lowerCamelCase__ = self._index.query(a_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a_ , a_ )
if len(a_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a_ )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(a_ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a_ )
return duplicate_clusters
def _UpperCamelCase ( self : int , a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.get_duplicate_clusters()
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ )
def snake_case (UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case (UpperCamelCase : Type[Dataset] ) -> List[str]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCamelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case (UpperCamelCase : Type[Dataset] , UpperCamelCase : float ) -> Any:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase ) ) , max_queue_size=100 ) ):
di.add(UpperCamelCase , UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case (UpperCamelCase : str , UpperCamelCase : str ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_tokens(UpperCamelCase )
lowerCamelCase__ = get_tokens(UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a__ : int = None
def snake_case (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(UpperCamelCase , UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(UpperCamelCase )
return extremes
def snake_case (UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCamelCase , UpperCamelCase , ) , total=len(UpperCamelCase ) , ):
extremes_list.append(UpperCamelCase )
return extremes_list
def snake_case (UpperCamelCase : Type[Dataset] , UpperCamelCase : float = 0.8_5 ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda UpperCamelCase , UpperCamelCase : idx not in remove_indices , with_indices=UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(UpperCamelCase )}''' )
print(f'''Number of duplicate clusters: {len(UpperCamelCase )}''' )
print(f'''Files in duplicate cluster: {len(UpperCamelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(UpperCamelCase )}''' )
print(f'''Filtered dataset size: {len(UpperCamelCase )}''' )
return ds_filter, duplicate_clusters
| 712 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(a_ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(a_ , """num_attention_heads""" ) )
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Dict , a_ : Tuple=13 , a_ : Any=32 , a_ : Optional[int]=2 , a_ : Optional[int]=3 , a_ : List[Any]=6_40 , a_ : Optional[int]=4 , a_ : Dict="silu" , a_ : List[Any]=3 , a_ : Union[str, Any]=32 , a_ : Optional[int]=0.1 , a_ : Any=0.1 , a_ : List[str]=0.1 , a_ : str=0.0_2 , a_ : str=True , a_ : Optional[int]=True , a_ : List[Any]=10 , a_ : Tuple=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = last_hidden_size
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = conv_kernel_size
lowerCamelCase__ = output_stride
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = use_labels
lowerCamelCase__ = is_training
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple , a_ : Dict , a_ : Dict , a_ : Dict , a_ : str ):
"""simple docstring"""
lowerCamelCase__ = MobileViTModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self : str , a_ : Tuple , a_ : int , a_ : str , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Optional[Any] , a_ : List[Any] , a_ : str , a_ : List[str] , a_ : int ):
"""simple docstring"""
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileViTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
lowerCamelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = MobileViTModelTester(self )
lowerCamelCase__ = MobileViTConfigTester(self , config_class=a_ , has_text_modality=a_ )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(a_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Any , a_ : List[str] , a_ : str ):
lowerCamelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = MobileViTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(a_ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase__ = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(a_ )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits
# verify the logits
lowerCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
lowerCamelCase__ = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = model.to(a_ )
lowerCamelCase__ = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits.detach().cpu()
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
lowerCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=a_ )
lowerCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 235 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a__ : List[Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : int , *__A : Any , **__A : List[str] ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 399 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a__ : int ='''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a__ : str ='''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a__ : Any ='''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _lowerCamelCase ( self : List[Any] , __A : Union[str, Any] , __A : str , __A : List[str]=None , __A : List[str]=True , __A : Optional[Any]=False ):
if rouge_types is None:
__UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__A , use_stemmer=__A )
if use_aggregator:
__UpperCamelCase = scoring.BootstrapAggregator()
else:
__UpperCamelCase = []
for ref, pred in zip(__A , __A ):
__UpperCamelCase = scorer.score(__A , __A )
if use_aggregator:
aggregator.add_scores(__A )
else:
scores.append(__A )
if use_aggregator:
__UpperCamelCase = aggregator.aggregate()
else:
__UpperCamelCase = {}
for key in scores[0]:
__UpperCamelCase = [score[key] for score in scores]
return result
| 399 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCamelCase_ = get_logger(__name__)
UpperCamelCase_ = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class _snake_case :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self: str ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self: List[Any] ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( __snake_case ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self: Optional[int] ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ,**lowerCamelCase_: Optional[Any] ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase_ : Dict = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCAmelCase_ : List[str] = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
else:
UpperCAmelCase_ : Union[str, Any] = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: float ) -> Optional[int]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCAmelCase_ : str = temperature
def __call__( self: int ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ : int = scores / self.temperature
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: float ,lowerCamelCase_: float = -float("""Inf""" ) ,lowerCamelCase_: int = 1 ) -> Union[str, Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCAmelCase_ : Optional[int] = top_p
UpperCAmelCase_ : Optional[int] = filter_value
UpperCAmelCase_ : Optional[Any] = min_tokens_to_keep
def __call__( self: str ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = lax.top_k(lowerCamelCase_ ,scores.shape[-1] )
UpperCAmelCase_ : List[Any] = jnp.full_like(lowerCamelCase_ ,self.filter_value )
UpperCAmelCase_ : Tuple = jax.nn.softmax(lowerCamelCase_ ,axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase_ : Any = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase_ : str = jnp.roll(lowerCamelCase_ ,1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
UpperCAmelCase_ : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
UpperCAmelCase_ : int = jnp.where(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = jax.lax.sort_key_val(lowerCamelCase_ ,lowerCamelCase_ )[-1]
return next_scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: float = -float("""Inf""" ) ,lowerCamelCase_: int = 1 ) -> Optional[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCAmelCase_ : Any = max(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : str = filter_value
def __call__( self: Tuple ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : str = scores.shape
UpperCAmelCase_ : int = jnp.full(batch_size * vocab_size ,self.filter_value )
UpperCAmelCase_ : Dict = min(self.top_k ,scores.shape[-1] ) # Safety check
UpperCAmelCase_ , UpperCAmelCase_ : Any = lax.top_k(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : str = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
UpperCAmelCase_ : str = topk_scores.flatten()
UpperCAmelCase_ : Optional[Any] = topk_indices.flatten() + shift
UpperCAmelCase_ : str = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
UpperCAmelCase_ : str = next_scores_flat.reshape(lowerCamelCase_ ,lowerCamelCase_ )
return next_scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = bos_token_id
def __call__( self: str ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ : str = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase_ : List[Any] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase_ : Any = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.bos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> List[str]:
UpperCAmelCase_ : Tuple = max_length
UpperCAmelCase_ : List[Any] = eos_token_id
def __call__( self: Tuple ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ : Union[str, Any] = jnp.full(scores.shape ,-float("""inf""" ) )
UpperCAmelCase_ : Tuple = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase_ : str = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.eos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> List[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCAmelCase_ : Optional[int] = min_length
UpperCAmelCase_ : Optional[Any] = eos_token_id
def __call__( self: List[str] ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase_ : List[str] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
UpperCAmelCase_ : List[Any] = jnp.where(lowerCamelCase_ ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : int = begin_index
def __call__( self: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ) -> List[str]:
UpperCAmelCase_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase_ : Optional[Any] = jnp.where(lowerCamelCase_ ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: list ) -> Tuple:
UpperCAmelCase_ : int = list(lowerCamelCase_ )
def __call__( self: str ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
UpperCAmelCase_ : Optional[int] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Optional[int] ) -> str:
UpperCAmelCase_ : List[Any] = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase_ : Dict = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase_ : Any = force_token_array.at[index].set(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = jnp.intaa(lowerCamelCase_ )
def __call__( self: List[Any] ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: jnp.ndarray ,lowerCamelCase_: int ) -> jnp.ndarray:
def _force_token(lowerCamelCase_: Dict ):
UpperCAmelCase_ : List[Any] = scores.shape[0]
UpperCAmelCase_ : int = self.force_token_array[generation_idx]
UpperCAmelCase_ : List[Any] = jnp.ones_like(lowerCamelCase_ ,dtype=scores.dtype ) * -float("""inf""" )
UpperCAmelCase_ : str = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
UpperCAmelCase_ : List[Any] = lax.dynamic_update_slice(lowerCamelCase_ ,lowerCamelCase_ ,(0, current_token) )
return new_scores
UpperCAmelCase_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(lowerCamelCase_ ) ,lambda: scores ,) ,)
return scores
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = generate_config.eos_token_id
UpperCAmelCase_ : Dict = generate_config.no_timestamps_token_id
UpperCAmelCase_ : Union[str, Any] = generate_config.no_timestamps_token_id + 1
UpperCAmelCase_ : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ ,"""max_initial_timestamp_index""" ):
UpperCAmelCase_ : Optional[int] = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase_ : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase_ : Dict = model_config.vocab_size
def __call__( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase_ : Any = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ):
UpperCAmelCase_ : Tuple = jnp.where((cur_len - self.begin_index) >= 1 ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[int] = jnp.where((cur_len - self.begin_index) < 2 ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,lowerCamelCase_ ,lowerCamelCase_ ,)
return jnp.where(
lowerCamelCase_ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,lowerCamelCase_ ,)
UpperCAmelCase_ : int = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = jnp.where(cur_len == self.begin_index ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,lowerCamelCase_ ,)
UpperCAmelCase_ : Tuple = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase_ : Union[str, Any] = jnp.where(
lowerCamelCase_ ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase_ : str = jax.nn.log_softmax(lowerCamelCase_ ,axis=-1 )
def handle_cumulative_probs(lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ):
UpperCAmelCase_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
UpperCAmelCase_ : Dict = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
UpperCAmelCase_ : Dict = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
return scores
| 322 |
from __future__ import annotations
def lowerCamelCase_ ( _a : int | float | str , _a : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
UpperCAmelCase_ : Tuple = int(_a )
UpperCAmelCase_ : Optional[int] = int(_a )
UpperCAmelCase_ : list[str] = []
for temp in range(int(_a ) ):
series.append(F'''1 / {pow(temp + 1 , int(_a ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input('''Enter the last number (nth term) of the P-Series'''))
UpperCamelCase_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 322 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_snake_case : Tuple = None
_snake_case : Dict = {
"7B": 11_008,
"13B": 13_824,
"30B": 17_920,
"65B": 22_016,
"70B": 28_672,
}
_snake_case : Any = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=1 , __lowerCamelCase=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(__lowerCamelCase , "r" ) as f:
return json.load(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "tmp" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__snake_case : Union[str, Any] = read_json(os.path.join(__lowerCamelCase , "params.json" ) )
__snake_case : Any = NUM_SHARDS[model_size]
__snake_case : List[Any] = params["n_layers"]
__snake_case : Any = params["n_heads"]
__snake_case : Optional[Any] = n_heads // num_shards
__snake_case : Dict = params["dim"]
__snake_case : Dict = dim // n_heads
__snake_case : Any = 1_0_0_0_0.0
__snake_case : List[Any] = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__snake_case : Tuple = params["n_kv_heads"] # for GQA / MQA
__snake_case : Optional[int] = n_heads_per_shard // num_key_value_heads
__snake_case : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__snake_case : Optional[Any] = n_heads
__snake_case : Tuple = n_heads_per_shard
__snake_case : Optional[Any] = dim
# permute for sliced rotary
def permute(__lowerCamelCase , __lowerCamelCase=n_heads , __lowerCamelCase=dim , __lowerCamelCase=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(F'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__snake_case : Optional[Any] = torch.load(os.path.join(__lowerCamelCase , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
__snake_case : int = [
torch.load(os.path.join(__lowerCamelCase , F'consolidated.{i:02d}.pth' ) , map_location="cpu" )
for i in range(__lowerCamelCase )
]
__snake_case : Dict = 0
__snake_case : Optional[int] = {"weight_map": {}}
for layer_i in range(__lowerCamelCase ):
__snake_case : str = F'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__snake_case : List[Any] = {
F'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wq.weight'] ),
F'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[F'layers.{layer_i}.attention.wk.weight'] ),
F'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[F'layers.{layer_i}.attention.wv.weight'],
F'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[F'layers.{layer_i}.attention.wo.weight'],
F'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w1.weight'],
F'model.layers.{layer_i}.mlp.down_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w2.weight'],
F'model.layers.{layer_i}.mlp.up_proj.weight': loaded[F'layers.{layer_i}.feed_forward.w3.weight'],
F'model.layers.{layer_i}.input_layernorm.weight': loaded[F'layers.{layer_i}.attention_norm.weight'],
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[F'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__snake_case : Tuple = {
F'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
F'layers.{layer_i}.attention_norm.weight'
].clone(),
F'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
F'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
__snake_case : Dict = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
__snake_case : List[Any] = permute(
torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
__snake_case : List[Any] = torch.cat(
[
loaded[i][F'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
__snake_case : Dict = torch.cat(
[loaded[i][F'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
__snake_case : List[Any] = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
__snake_case : Tuple = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
__snake_case : List[str] = torch.cat(
[loaded[i][F'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
__snake_case : Tuple = inv_freq
for k, v in state_dict.items():
__snake_case : Tuple = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
__snake_case : Optional[int] = F'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
__snake_case : Optional[Any] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
__snake_case : Optional[int] = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(__lowerCamelCase )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
__snake_case : str = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
__snake_case : str = {"total_size": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , "pytorch_model.bin.index.json" ) )
__snake_case : str = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
__snake_case : Dict = params["multiple_of"] if "multiple_of" in params else 2_5_6
__snake_case : List[str] = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
__snake_case : Union[str, Any] = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# Initialize the tokenizer based on the `spm` model
__snake_case : int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
__snake_case : List[Any] = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=__lowerCamelCase , help="Whether or not to save using `safetensors`." )
__snake_case : int = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__snake_case : Union[str, Any] = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 203 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = IFPipeline
lowerCamelCase :Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowerCamelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase :Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase ( self ) -> List[Any]:
return self._get_dummy_components()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> str:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ) -> Tuple:
self._test_save_load_local()
def UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Union[str, Any]:
# if
_A = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_A , _A = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , original_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase_ )
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , original_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 401 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ) -> Any:
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase_ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self ) -> str:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase ( self ) -> List[str]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""np""" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 401 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
__lowerCamelCase = kwargs.pop('''sampling_rate''' , _snake_case )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__lowerCamelCase = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if audios is not None:
__lowerCamelCase = self.feature_extractor(
_snake_case , sampling_rate=_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and audios is not None:
__lowerCamelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowerCamelCase ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowerCamelCase ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 575 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = old_name
if "patch_embed" in old_name:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = old_name.split('''.''' )
if layer == "0":
__lowerCamelCase = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowerCamelCase = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowerCamelCase = old_name.replace('''3''' , '''convolution2''' )
else:
__lowerCamelCase = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , A_ ):
__lowerCamelCase = R'''\b\d{2}\b'''
if bool(re.search(A_ , A_ ) ):
__lowerCamelCase = re.search(R'''\d\.\d\d.''' , A_ ).group()
else:
__lowerCamelCase = re.search(R'''\d\.\d.''' , A_ ).group()
if int(match[0] ) < 6:
__lowerCamelCase = old_name.replace(A_ , '''''' )
__lowerCamelCase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowerCamelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCamelCase = old_name.replace(A_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowerCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowerCamelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , A_ ):
__lowerCamelCase = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowerCamelCase = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCamelCase = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCamelCase = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowerCamelCase = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowerCamelCase = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowerCamelCase = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowerCamelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCamelCase = new_name.replace('''norm''' , '''layernorm''' )
__lowerCamelCase = '''efficientformer.''' + new_name
else:
__lowerCamelCase = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCamelCase_ ( A_ , A_ ):
for key in checkpoint.copy().keys():
__lowerCamelCase = checkpoint.pop(A_ )
__lowerCamelCase = val
return checkpoint
def lowerCamelCase_ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return image
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = torch.load(A_ , map_location='''cpu''' )['''model''']
__lowerCamelCase = EfficientFormerConfig.from_json_file(A_ )
__lowerCamelCase = EfficientFormerForImageClassificationWithTeacher(A_ )
__lowerCamelCase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowerCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCamelCase = convert_torch_checkpoint(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
__lowerCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = 2_56
__lowerCamelCase = 2_24
__lowerCamelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowerCamelCase = processor(images=A_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowerCamelCase = Compose(
[
Resize(A_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A_ ),
ToTensor(),
Normalize(A_ , A_ ),
] )
__lowerCamelCase = image_transforms(A_ ).unsqueeze(0 )
assert torch.allclose(A_ , A_ )
__lowerCamelCase = model(A_ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = (1, 10_00)
if "l1" in model_name:
__lowerCamelCase = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A_ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=A_ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=A_ , )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_UpperCamelCase : Tuple =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 575 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.