code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[int] = """altclip_text_model"""
def __init__( self : str , UpperCamelCase__ : Any=2_5_0_0_0_2 , UpperCamelCase__ : Union[str, Any]=1_0_2_4 , UpperCamelCase__ : List[Any]=2_4 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : int=4_0_9_6 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_4 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Optional[Any]=1e-05 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Union[str, Any]=7_6_8 , **UpperCamelCase__ : List[Any] , )-> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: List[Any] = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[str] = intermediate_size
__lowerCAmelCase: Tuple = hidden_dropout_prob
__lowerCAmelCase: Dict = attention_probs_dropout_prob
__lowerCAmelCase: Union[str, Any] = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Tuple = initializer_range
__lowerCAmelCase: str = initializer_factor
__lowerCAmelCase: int = layer_norm_eps
__lowerCAmelCase: List[Any] = position_embedding_type
__lowerCAmelCase: List[str] = use_cache
__lowerCAmelCase: Dict = project_dim
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = """altclip_vision_model"""
def __init__( self : Dict , UpperCamelCase__ : Dict=7_6_8 , UpperCamelCase__ : int=3_0_7_2 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : int=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : Any=3_2 , UpperCamelCase__ : List[str]="quick_gelu" , UpperCamelCase__ : Optional[int]=1e-5 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1.0 , **UpperCamelCase__ : Optional[int] , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Optional[Any] = projection_dim
__lowerCAmelCase: List[str] = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[int] = patch_size
__lowerCAmelCase: str = image_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Tuple = initializer_factor
__lowerCAmelCase: List[str] = attention_dropout
__lowerCAmelCase: List[Any] = layer_norm_eps
__lowerCAmelCase: List[str] = hidden_act
@classmethod
def lowercase_ ( cls : int , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Optional[int])-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type") == "altclip":
__lowerCAmelCase: str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """altclip"""
SCREAMING_SNAKE_CASE_ : str = True
def __init__( self : Tuple , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=7_6_8 , UpperCamelCase__ : Any=2.6592 , **UpperCamelCase__ : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = kwargs.pop("text_config_dict" , UpperCamelCase__)
__lowerCAmelCase: Dict = kwargs.pop("vision_config_dict" , UpperCamelCase__)
super().__init__(**UpperCamelCase__)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowerCAmelCase: List[Any] = {}
# This is the complete result when using `text_config_dict`.
__lowerCAmelCase: Tuple = AltCLIPTextConfig(**UpperCamelCase__).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowerCAmelCase: str = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCAmelCase: Dict = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCamelCase__)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
__lowerCAmelCase: str = {}
# This is the complete result when using `vision_config_dict`.
__lowerCAmelCase: Dict = AltCLIPVisionConfig(**UpperCamelCase__).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowerCAmelCase: int = {
str(UpperCamelCase__): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowerCAmelCase: int = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCAmelCase: Optional[Any] = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(UpperCamelCase__)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
__lowerCAmelCase: List[str] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
if vision_config is None:
__lowerCAmelCase: Tuple = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
__lowerCAmelCase: str = AltCLIPTextConfig(**UpperCamelCase__)
__lowerCAmelCase: str = AltCLIPVisionConfig(**UpperCamelCase__)
__lowerCAmelCase: Any = projection_dim
__lowerCAmelCase: Any = logit_scale_init_value
__lowerCAmelCase: int = 1.0
@classmethod
def lowercase_ ( cls : Dict , UpperCamelCase__ : AltCLIPTextConfig , UpperCamelCase__ : AltCLIPVisionConfig , **UpperCamelCase__ : int)-> Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__)
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = copy.deepcopy(self.__dict__)
__lowerCAmelCase: List[str] = self.text_config.to_dict()
__lowerCAmelCase: str = self.vision_config.to_dict()
__lowerCAmelCase: Any = self.__class__.model_type
return output
| 217
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Tuple = str(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 9 and set(__SCREAMING_SNAKE_CASE ) == set("123456789" )
def a__ ( ) -> int | None:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__lowerCAmelCase: Tuple = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__lowerCAmelCase: int = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217
| 1
|
"""simple docstring"""
from math import ceil, sqrt
def a__ ( snake_case__ = 1_00_00_00 ) -> int:
lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 168
|
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_a ) != 0:
lowerCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
lowerCamelCase = rows
else:
lowerCamelCase = []
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return bool(self.determinant() )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
"""simple docstring"""
return str(self.rows )
def __str__( self ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_a ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_a )
else:
lowerCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _a ):
"""simple docstring"""
return not self == other
def __neg__( self ):
"""simple docstring"""
return self * -1
def __add__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _a ):
"""simple docstring"""
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCAmelCase ( cls , _a , _a ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'OwlViTImageProcessor'
__snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
A__ : int =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
A__ : Optional[Any] =kwargs.pop("""feature_extractor""" )
A__ : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Dict , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]="max_length" , lowerCAmelCase_ : Dict="np" , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
A__ : Any =[self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
A__ : str =[]
# Maximum number of queries across batch
A__ : Optional[int] =max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
A__ : Any =t + [""" """] * (max_num_queries - len(lowerCAmelCase_ ))
A__ : List[str] =self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A__ : Union[str, Any] =np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ : Dict =np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A__ : Optional[Any] =jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ : str =jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A__ : Dict =torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
A__ : List[Any] =torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A__ : List[Any] =tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A__ : int =tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A__ : str =BatchEncoding()
A__ : Dict =input_ids
A__ : Any =attention_mask
if query_images is not None:
A__ : Union[str, Any] =BatchEncoding()
A__ : str =self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
A__ : Any =query_pixel_values
if images is not None:
A__ : Any =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
A__ : List[str] =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A__ : List[Any] =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase__ ( self : List[str] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Dict , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 134
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : int = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'xlm-roberta'
def __init__( self : List[Any] , lowerCAmelCase_ : str=3_05_22 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Any =hidden_size
A__ : Any =num_hidden_layers
A__ : str =num_attention_heads
A__ : Union[str, Any] =hidden_act
A__ : Union[str, Any] =intermediate_size
A__ : Optional[Any] =hidden_dropout_prob
A__ : List[Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : int =type_vocab_size
A__ : Any =initializer_range
A__ : Union[str, Any] =layer_norm_eps
A__ : str =position_embedding_type
A__ : str =use_cache
A__ : Any =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Union[str, Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : str ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 134
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_ = logging.getLogger()
def __magic_name__ ( A ) -> int:
snake_case = {}
snake_case = os.path.join(A , 'all_results.json' )
if os.path.exists(A ):
with open(A , 'r' ) as f:
snake_case = json.load(A )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase ( __lowerCAmelCase ):
def _lowerCamelCase ( self ) -> str:
import xla_spawn
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_, 'argv', lowercase_ ):
snake_case = time()
xla_spawn.main()
snake_case = time()
snake_case = get_results(lowercase_ )
self.assertGreaterEqual(result['eval_accuracy'], 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start, 500 )
def _lowerCamelCase ( self ) -> Any:
import xla_spawn
snake_case = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase_, 'argv', lowercase_ ):
xla_spawn.main()
| 332
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332
| 1
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=768 ) -> int:
super().__init__(lowercase )
lowerCAmelCase = proj_size
lowerCAmelCase = CLIPVisionModel(lowercase )
lowerCAmelCase = PaintByExampleMapper(lowercase )
lowerCAmelCase = nn.LayerNorm(config.hidden_size )
lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _snake_case ( self , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = self.model(pixel_values=lowercase )
lowerCAmelCase = clip_output.pooler_output
lowerCAmelCase = self.mapper(latent_states[:, None] )
lowerCAmelCase = self.final_layer_norm(lowercase )
lowerCAmelCase = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase ( nn.Module ):
def __init__( self , lowercase ) -> str:
super().__init__()
lowerCAmelCase = (config.num_hidden_layers + 1) // 5
lowerCAmelCase = config.hidden_size
lowerCAmelCase = 1
lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn="""gelu""" , attention_bias=lowercase )
for _ in range(lowercase )
] )
def _snake_case ( self , lowercase ) -> Optional[int]:
for block in self.blocks:
lowerCAmelCase = block(lowercase )
return hidden_states
| 46
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[str] ):
'''simple docstring'''
lowercase = ''
for i in table:
res += inp[i - 1]
return res
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
return data[1:] + data[0]
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = ''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = int('0b' + data[0] + data[-1] , 2 )
lowercase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(__snake_case , __snake_case )
lowercase = xor(__snake_case , __snake_case )
lowercase = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowercase = apply_sbox(__snake_case , temp[4:] )
lowercase = '0' * (2 - len(__snake_case )) + l # noqa: E741
lowercase = '0' * (2 - len(__snake_case )) + r
lowercase = apply_table(l + r , __snake_case )
lowercase = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
_UpperCamelCase : Dict = input('Enter 10 bit key: ')
_UpperCamelCase : int = input('Enter 8 bit message: ')
_UpperCamelCase : Optional[int] = [6, 3, 7, 4, 8, 5, 1_0, 9]
_UpperCamelCase : Optional[Any] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_UpperCamelCase : str = [2, 4, 3, 1]
_UpperCamelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCamelCase : List[str] = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCamelCase : int = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCamelCase : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCamelCase : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCamelCase : str = apply_table(key, paa_table)
_UpperCamelCase : Union[str, Any] = temp[:5]
_UpperCamelCase : Dict = temp[5:]
_UpperCamelCase : List[Any] = left_shift(left)
_UpperCamelCase : str = left_shift(right)
_UpperCamelCase : int = apply_table(left + right, pa_table)
_UpperCamelCase : Optional[int] = left_shift(left)
_UpperCamelCase : Any = left_shift(right)
_UpperCamelCase : str = left_shift(left)
_UpperCamelCase : Tuple = left_shift(right)
_UpperCamelCase : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
_UpperCamelCase : Tuple = apply_table(message, IP)
_UpperCamelCase : Any = function(expansion, sa, sa, keya, temp)
_UpperCamelCase : List[Any] = temp[4:] + temp[:4]
_UpperCamelCase : List[str] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase : int = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_UpperCamelCase : List[Any] = apply_table(CT, IP)
_UpperCamelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_UpperCamelCase : int = temp[4:] + temp[:4]
_UpperCamelCase : str = function(expansion, sa, sa, keya, temp)
_UpperCamelCase : List[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 220
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 143
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143
| 1
|
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Optional[int] =[1]
for i in range(2 , SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a__ : int =[]
a__ : Optional[Any] =list(range(SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
a__ : int =factorials.pop()
a__ , a__ : int =divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = process
SCREAMING_SNAKE_CASE_ = params
def __len__( self ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.dataset[i]
SCREAMING_SNAKE_CASE_ = self.process(_A , **self.params )
return processed
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A=None ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = loader
SCREAMING_SNAKE_CASE_ = infer
SCREAMING_SNAKE_CASE_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __len__( self ) -> Any:
return len(self.loader )
def __iter__( self ) -> str:
SCREAMING_SNAKE_CASE_ = iter(self.loader )
return self
def _UpperCamelCase ( self ) -> Optional[Any]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE_ = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _UpperCamelCase ( self ) -> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE_ = next(self.iterator )
SCREAMING_SNAKE_CASE_ = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = processed
else:
SCREAMING_SNAKE_CASE_ = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE_ = processed[key]
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
SCREAMING_SNAKE_CASE_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE_ = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE_ = processed
SCREAMING_SNAKE_CASE_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A=None ) -> Union[str, Any]:
super().__init__(_A , _A , _A )
def __iter__( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = iter(self.loader )
SCREAMING_SNAKE_CASE_ = None
return self
def _UpperCamelCase ( self ) -> str:
if self.subiterator is None:
SCREAMING_SNAKE_CASE_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE_ = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE_ = next(self.subiterator )
return processed
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __iter__( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = iter(self.loader )
return self
def _UpperCamelCase ( self ) -> Dict:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE_ = self.loader_batch_item()
SCREAMING_SNAKE_CASE_ = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = processed
else:
SCREAMING_SNAKE_CASE_ = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE_ = processed[key]
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
SCREAMING_SNAKE_CASE_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE_ = observed_batch_size
SCREAMING_SNAKE_CASE_ = processed
SCREAMING_SNAKE_CASE_ = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE_ = self.loader_batch_item()
SCREAMING_SNAKE_CASE_ = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE_ = processed
SCREAMING_SNAKE_CASE_ = item.pop('''is_last''' )
accumulator.append(_A )
return accumulator
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = key
def __len__( self ) -> str:
return len(self.dataset )
def __getitem__( self , _A ) -> Optional[Any]:
return self.dataset[i][self.key]
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = keya
SCREAMING_SNAKE_CASE_ = keya
def __len__( self ) -> Tuple:
return len(self.dataset )
def __getitem__( self , _A ) -> Any:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCamelCase__ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _A ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_config(_A )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A )
SCREAMING_SNAKE_CASE_ = self.bert(**_A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
BertTokenizer.from_pretrained(_A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ = [TFBertTokenizer.from_pretrained(_A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_A , use_fast_bert_tokenizer=_A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _UpperCamelCase ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_tensors='''tf''' , padding='''longest''' )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(_A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf.function(_A )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
SCREAMING_SNAKE_CASE_ = compiled_tokenizer(_A )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(_A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = ModelToSave(tokenizer=_A )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE_ = model(_A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ = Path(_A ) / '''saved.model'''
model.save(_A )
SCREAMING_SNAKE_CASE_ = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ = loaded_model(_A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 257
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__ ( snake_case_ :Optional[int] ):
__UpperCAmelCase = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__ ( snake_case_ :Optional[int] ):
__UpperCAmelCase = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def lowercase__ ( ):
__UpperCAmelCase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[int] , snake_case_ :Union[str, Any] , snake_case_ :Any ):
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = 1_000
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = num_labels
__UpperCAmelCase = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = __UpperCAmelCase = CvtConfig(num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__UpperCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__UpperCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCAmelCase = [2, 2, 20]
__UpperCAmelCase = [3, 12, 16]
__UpperCAmelCase = [192, 768, 1_024]
__UpperCAmelCase = CvtForImageClassification(snake_case_ )
__UpperCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__UpperCAmelCase = image_size
__UpperCAmelCase = torch.load(snake_case_ , map_location=torch.device('''cpu''' ) )
__UpperCAmelCase = OrderedDict()
__UpperCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__UpperCAmelCase = list_of_state_dict + cls_token(snake_case_ )
__UpperCAmelCase = list_of_state_dict + embeddings(snake_case_ )
for cnt in range(config.depth[idx] ):
__UpperCAmelCase = list_of_state_dict + attention(snake_case_ , snake_case_ )
__UpperCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case_ )
for i in range(len(snake_case_ ) ):
__UpperCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowercase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "swinv2"
a__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__UpperCAmelCase = (0, 0, 0, 0)
| 332
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( snake_case_ : Union[str, Any]="" ) ->str:
'''simple docstring'''
__A : Tuple = tempfile.mkdtemp()
return os.path.join(snake_case_ ,str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__A : str = AgentAudio(__lowerCamelCase )
__A : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
__A , __A : Tuple = sf.read(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , torch.tensor(__lowerCamelCase ) , atol=1e-4 ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
__A : Optional[int] = get_new_path(suffix='''.wav''' )
sf.write(__lowerCamelCase , __lowerCamelCase , 1_6000 )
__A : Union[str, Any] = AgentAudio(__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , __lowerCamelCase )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = torch.randint(0 , 256 , (64, 64, 3) )
__A : List[str] = AgentImage(__lowerCamelCase )
__A : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCamelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
__A : str = Image.open(__lowerCamelCase )
__A : List[Any] = AgentImage(__lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
__A : Optional[int] = Image.open(__lowerCamelCase )
__A : Any = AgentImage(__lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCamelCase ) )
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''Hey!'''
__A : Tuple = AgentText(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , agent_type.to_string() )
self.assertEqual(__lowerCamelCase , agent_type.to_raw() )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 291
|
"""simple docstring"""
import numpy as np
import qiskit
def __lowercase ( snake_case_ : int = 8 ,snake_case_ : int | None = None ) ->str:
'''simple docstring'''
__A : str = np.random.default_rng(seed=snake_case_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__A : str = 6 * key_len
# Measurement basis for Alice's qubits.
__A : Any = rng.integers(2 ,size=snake_case_ )
# The set of states Alice will prepare.
__A : Any = rng.integers(2 ,size=snake_case_ )
# Measurement basis for Bob's qubits.
__A : str = rng.integers(2 ,size=snake_case_ )
# Quantum Circuit to simulate BB84
__A : Dict = qiskit.QuantumCircuit(snake_case_ ,name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case_ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__A : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__A : List[str] = qiskit.execute(snake_case_ ,snake_case_ ,shots=1 ,seed_simulator=snake_case_ )
# Returns the result of measurement.
__A : Union[str, Any] = job.result().get_counts(snake_case_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__A : int = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case_ ,snake_case_ ,snake_case_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__A : Union[str, Any] = gen_key[:key_len] if len(snake_case_ ) >= key_len else gen_key.ljust(snake_case_ ,'''0''' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 291
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
A__ = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(SCREAMING_SNAKE_CASE_ ) , "Postfix".center(SCREAMING_SNAKE_CASE_ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack
print(
x.center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> List[str]:
'''simple docstring'''
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if infix[i] == "(":
A__ = ")" # change "(" to ")"
elif infix[i] == ")":
A__ = "(" # change ")" to "("
return (infix_2_postfix("".join(SCREAMING_SNAKE_CASE_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
lowerCAmelCase__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 68
|
import numpy as np
from PIL import Image
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 117
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
# setable values
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
@classmethod
def A_ ( cls : List[str] , UpperCAmelCase : CommonSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray ) -> Union[str, Any]:
return cls(common=UpperCAmelCase , init_noise_sigma=UpperCAmelCase , timesteps=UpperCAmelCase )
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCAmelCase__ = 42
@property
def A_ ( self : List[str] ) -> Optional[Any]:
return True
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : float = 0.0_0_0_1 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : str = "linear" , UpperCAmelCase : Optional[jnp.ndarray] = None , UpperCAmelCase : str = "fixed_small" , UpperCAmelCase : bool = True , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> Tuple:
lowerCamelCase__ : Optional[Any] = dtype
def A_ ( self : Optional[Any] , UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
if common is None:
lowerCamelCase__ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ : List[Any] = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase__ : str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCAmelCase , init_noise_sigma=UpperCAmelCase , timesteps=UpperCAmelCase , )
def A_ ( self : Dict , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
return sample
def A_ ( self : int , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : int , UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
lowerCamelCase__ : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ : int = (jnp.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase , )
def A_ ( self : str , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : Any , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=None ) -> Any:
lowerCamelCase__ : List[str] = state.common.alphas_cumprod[t]
lowerCamelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ : Dict = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ : Optional[int] = jnp.clip(UpperCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ : List[str] = jnp.log(jnp.clip(UpperCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ : str = variance
lowerCamelCase__ : Dict = state.common.betas[t]
lowerCamelCase__ : int = (predicted_variance + 1) / 2
lowerCamelCase__ : str = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : Tuple , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : Optional[jax.random.KeyArray] = None , UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if key is None:
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = jnp.split(UpperCAmelCase , sample.shape[1] , axis=1 )
else:
lowerCamelCase__ : str = None
# 1. compute alphas, betas
lowerCamelCase__ : Tuple = state.common.alphas_cumprod[t]
lowerCamelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase__ : Union[str, Any] = 1 - alpha_prod_t
lowerCamelCase__ : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ : List[str] = jnp.clip(UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ : int = jax.random.split(UpperCAmelCase , num=1 )
lowerCamelCase__ : Tuple = jax.random.normal(UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCAmelCase , UpperCAmelCase , predicted_variance=UpperCAmelCase ) ** 0.5) * noise
lowerCamelCase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCAmelCase , state=UpperCAmelCase )
def A_ ( self : Any , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
return add_noise_common(state.common , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : DDPMSchedulerState , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
return get_velocity_common(state.common , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __len__( self : Dict ) -> Any:
return self.config.num_train_timesteps
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase , _lowercase : List[str] = emb.weight.shape
_lowercase : int = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
_lowercase : List[Any] = emb.weight.data
return lin_layer
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
_lowercase : Union[str, Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
_lowercase : Any = mam_aaa['model']
remove_ignore_keys_(lowerCamelCase_ )
_lowercase : List[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
_lowercase : List[Any] = MaMaaaConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
_lowercase : Optional[Any] = state_dict['decoder.embed_tokens.weight']
_lowercase : Optional[int] = MaMaaaForConditionalGeneration(lowerCamelCase_ )
model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
_lowercase : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 21
|
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342
| 0
|
import random
class __snake_case :
@staticmethod
def UpperCAmelCase__ ( snake_case__ ) -> tuple[list[int], list[int]]:
'''simple docstring'''
UpperCAmelCase : str =[ord(snake_case__ ) for i in text]
UpperCAmelCase : Optional[Any] =[]
UpperCAmelCase : Optional[Any] =[]
for i in plain:
UpperCAmelCase : List[str] =random.randint(1 , 300 )
UpperCAmelCase : Optional[int] =(i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def UpperCAmelCase__ ( snake_case__ , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : str =[]
for i in range(len(snake_case__ ) ):
UpperCAmelCase : Union[str, Any] =int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__snake_case , __snake_case = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 78
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[list[int]]:
'''simple docstring'''
UpperCAmelCase : list[list[int]] =[]
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = 4
__snake_case = 2
__snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 78
| 1
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCAmelCase_ : int = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
lowerCAmelCase_ : List[str] = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase_ : List[str] = file_path.read_text(encoding="utf-8" )
else:
lowerCAmelCase_ : Dict = output_path.read_text(encoding="utf-8" )
lowerCAmelCase_ : Tuple = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Tuple = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
lowerCAmelCase_ : Tuple = input_paths[compression_format]
if input_path is None:
lowerCAmelCase_ : Optional[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
lowerCAmelCase_ : Dict = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
lowerCAmelCase_ : Optional[Any] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase_ : List[Any] = file_path.read_text(encoding="utf-8" )
else:
lowerCAmelCase_ : Dict = output_path.read_text(encoding="utf-8" )
lowerCAmelCase_ : Tuple = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
import tarfile
lowerCAmelCase_ : List[str] = tmp_path / "data_dot_dot"
directory.mkdir()
lowerCAmelCase_ : str = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__UpperCamelCase , "w" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
import tarfile
lowerCAmelCase_ : List[str] = tmp_path / "data_sym_link"
directory.mkdir()
lowerCAmelCase_ : Optional[Any] = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
lowerCAmelCase_ : Union[str, Any] = insecure_tar_files[insecure_tar_file]
lowerCAmelCase_ : Optional[int] = tmp_path / "extracted"
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : str = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
lowerCAmelCase_ : Optional[int] = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 241
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase__ = get_logger()
lowercase__ = None
class __lowerCamelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple ):
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ , a_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCAmelCase_ : List[Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase_ : Tuple = str(jax.devices()[0] )
lowerCAmelCase_ : Dict = jnp_array_kwargs
@staticmethod
def lowerCamelCase ( ):
import jax
return {str(a_ ): device for device in jax.devices()}
def lowerCamelCase ( self : Tuple , a_ : Dict ):
import jax
import jax.numpy as jnp
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ , axis=0 )
return column
def lowerCamelCase ( self : Tuple , a_ : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase_ : Any = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase_ : List[Any] = {"dtype": jnp.intaa}
else:
lowerCAmelCase_ : Tuple = {"dtype": jnp.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase ( self : Any , a_ : List[str] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ , "__array__" ) and not isinstance(a_ , jax.Array ):
lowerCAmelCase_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def lowerCamelCase ( self : Dict , a_ : dict ):
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def lowerCamelCase ( self : List[str] , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_row(a_ )
lowerCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def lowerCamelCase ( self : Any , a_ : pa.Table ):
lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(a_ )
lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
lowerCAmelCase_ : Optional[Any] = self._consolidate(a_ )
return column
def lowerCamelCase ( self : Tuple , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
lowerCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(a_ )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
for column_name in batch:
lowerCAmelCase_ : Tuple = self._consolidate(batch[column_name] )
return batch
| 241
| 1
|
A : Dict = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 371
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 0
|
"""simple docstring"""
from math import factorial
a = {str(d): factorial(d) for d in range(1_0)}
def lowercase (snake_case__ : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def lowercase () -> int:
'''simple docstring'''
lowerCAmelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155
|
"""simple docstring"""
from itertools import permutations
def lowercase (snake_case__ : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase (snake_case__ : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( lowercase__ ):
A__ = 0
A__ = False
A__ = 3.0
class A__ ( unittest.TestCase ):
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCamelCase )
@require_multi_gpu
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowerCamelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase : List[str] = torch.nn.Linear(1_0_0, 2_0_0)
lowerCamelCase : str = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase : str = ''
lowerCamelCase : List[Any] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 358
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase_ ( A__ : Tuple=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
snake_case = subparsers.add_parser("env" )
else:
snake_case = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=A__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def lowercase_ ( A__ : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case = torch.__version__
snake_case = torch.cuda.is_available()
snake_case = is_xpu_available()
snake_case = is_npu_available()
snake_case = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(A__ ):
snake_case = load_config_from_file(args.config_file ).to_dict()
snake_case = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"PyTorch XPU available": str(A__ ),
"PyTorch NPU available": str(A__ ),
"System RAM": F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
snake_case = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
snake_case = (
"\n".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(A__ , A__ )
else F'\t{accelerate_config}'
)
print(A__ )
snake_case = accelerate_config
return info
def lowercase_ ( ) -> int:
"""simple docstring"""
snake_case = env_command_parser()
snake_case = parser.parse_args()
env_command(A__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 363
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
_A = {"mobilebert-uncased": 5_12}
_A = {}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = MobileBertTokenizer
def __init__(self : Any , _A : str=None , _A : str=None , _A : Union[str, Any]=True , _A : Optional[Any]="[UNK]" , _A : int="[SEP]" , _A : Dict="[PAD]" , _A : int="[CLS]" , _A : Union[str, Any]="[MASK]" , _A : Any=True , _A : Dict=None , **_A : List[str] , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : List[str] , _A : Union[str, Any] , _A : Dict=None ) -> Optional[Any]:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_UpperCamelCase : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda __snake_case : i.created_at , reverse=__snake_case )
lowercase = comments[0] if len(__snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 220
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( __snake_case : str = "AAPL" ):
'''simple docstring'''
lowercase = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
lowercase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 220
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase_ : Optional[int] = 5_0_0_0_0
lowerCAmelCase_ : str = 5_0_0_0
lowerCAmelCase_ : Optional[int] = os.path.split(__file__)
lowerCAmelCase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(lowerCAmelCase ):
UpperCAmelCase = dataset[i]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase = dataset[i : i + batch_size]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(lowerCAmelCase ):
UpperCAmelCase = dataset[i]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = dataset[i : i + batch_size]
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase = generate_example_dataset(
os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase ) )
UpperCAmelCase = func(lowerCAmelCase , **lowerCAmelCase )
print("""shuffling dataset""" )
UpperCAmelCase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) )
UpperCAmelCase = func(
lowerCAmelCase , **lowerCAmelCase )
with open(lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 365
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(snake_case__ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 248
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowercase_ = "base_with_context"
def a ( A__ : Optional[int] , A__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase =weights[F'''layers_{lyr_num}''']
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase =ly_weight['attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a ( A__ : Optional[int] , A__ : Optional[int] ) -> Dict:
"""simple docstring"""
_lowercase =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase =weights[F'''layers_{lyr_num}''']
_lowercase =ly_weight['attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a ( A__ : List[str] , A__ : List[Any] ) -> Any:
"""simple docstring"""
_lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
_lowercase =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowercase =weights[F'''layers_{lyr_num}''']
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_lowercase =ly_weight['self_attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase =ly_weight['MultiHeadDotProductAttention_0']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_lowercase =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a ( A__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowercase =jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
_lowercase =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowercase =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_lowercase =inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
_lowercase =inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
_lowercase =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_lowercase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _lowerCamelCase )
_lowercase =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _lowerCamelCase )
_lowercase =load_decoder(ta_checkpoint['target']['decoder'] , _lowerCamelCase )
_lowercase =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_lowercase =SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowercase_ = parser.parse_args()
main(args)
| 205
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
@staticmethod
def UpperCAmelCase_ ( )-> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str:
raise NotImplementedError
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase_ ( cls :int )-> Any:
return F"`pip install {cls.pip_package or cls.name}`"
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """optuna"""
@staticmethod
def UpperCAmelCase_ ( )-> int:
return is_optuna_available()
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]:
return default_hp_space_optuna(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """ray"""
__lowercase = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase_ ( )-> str:
return is_ray_available()
def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int:
return default_hp_space_ray(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """sigopt"""
@staticmethod
def UpperCAmelCase_ ( )-> Union[str, Any]:
return is_sigopt_available()
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return default_hp_space_sigopt(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """wandb"""
@staticmethod
def UpperCAmelCase_ ( )-> List[str]:
return is_wandb_available()
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict:
return default_hp_space_wandb(lowercase_ )
__lowerCAmelCase : int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
A__ = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237
| 0
|
import math
def UpperCamelCase ( __lowercase : float ,__lowercase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 361
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( __lowercase : Any ,__lowercase : str ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = _TestCommandArgs(dataset=__lowercase ,all_configs=__lowercase ,save_infos=__lowercase )
A_ : List[Any] = TestCommand(*__lowercase )
test_command.run()
A_ : Any = os.path.join(__lowercase ,'README.md' )
assert os.path.exists(__lowercase )
A_ : Tuple = DatasetInfosDict.from_directory(__lowercase )
A_ : Any = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) ,splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] ,download_size=3_94_06_80 ,dataset_size=2_58_99_81 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ : Union[str, Any] = getattr(dataset_infos['default'] ,__lowercase ), getattr(expected_dataset_infos['default'] ,__lowercase )
if key == "num_bytes":
assert is_apercent_close(__lowercase ,__lowercase )
elif key == "splits":
assert list(__lowercase ) == list(__lowercase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 192
| 0
|
"""simple docstring"""
from PIL import Image
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Image:
lowercase__ : Union[str, Any] = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__lowerCamelCase ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase_ = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 16
|
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main()
| 76
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ) ) )
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
UpperCamelCase__ : str =(
'''Wrong input data\'s dimensions... '''
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase__ : Any =(
'''Wrong input data\'s shape... '''
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
UpperCamelCase__ : int =(
'''Input data have different datatype... '''
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(UpperCAmelCase )
UpperCamelCase__ : Optional[Any] =[]
for value in value_array:
UpperCamelCase__ : Optional[int] =euclidean(UpperCAmelCase , dataset[0] )
UpperCamelCase__ : int =dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase__ : Union[str, Any] =euclidean(UpperCAmelCase , UpperCAmelCase )
if dist > temp_dist:
UpperCamelCase__ : Optional[Any] =temp_dist
UpperCamelCase__ : int =dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ):
'''simple docstring'''
return np.dot(UpperCAmelCase , UpperCAmelCase ) / (norm(UpperCAmelCase ) * norm(UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157
| 0
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
__a = tf.convert_to_tensor(lowerCAmelCase__ )
__a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( lowerCAmelCase__ : Dict ) -> str:
__a = tf.convert_to_tensor(lowerCAmelCase__ )
__a = tf.cast(math.pi , x.dtype )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase__ , 3 )) ))
return x * cdf
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = tf.convert_to_tensor(lowerCAmelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : str ) -> Optional[Any]:
__a = tf.convert_to_tensor(lowerCAmelCase__ )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
__a = tf.convert_to_tensor(lowerCAmelCase__ )
__a = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
return tf.clip_by_value(_gelu(lowerCAmelCase__ ) , -10 , 10 )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=-1 ) -> str:
__a , __a = tf.split(lowerCAmelCase__ , 2 , axis=lowerCAmelCase__ )
return a * tf.math.sigmoid(lowerCAmelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> List[str]:
return tf.keras.activations.gelu(lowerCAmelCase__ , approximate=lowerCAmelCase__ )
lowercase_ = tf.keras.activations.gelu
lowercase_ = approximate_gelu_wrap
else:
lowercase_ = _gelu
lowercase_ = _gelu_new
lowercase_ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> int:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 45
|
"""simple docstring"""
import numpy as np
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = len(__a )
for i in range(length - 1 ):
snake_case_ : Optional[int] = i
for k in range(i + 1 , __a ):
if collection[k] < collection[least]:
snake_case_ : List[str] = k
if least != i:
snake_case_ ,snake_case_ : Tuple = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 88
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ : Optional[int] = TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
snake_case_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ : Optional[int] = 4
snake_case_ : List[str] = True
# hparam_utils.py hparams
snake_case_ : Optional[Any] = 0.664694
snake_case_ : Dict = 0.207951
snake_case_ : Tuple = 0.121194
snake_case_ : Dict = True
snake_case_ : int = True
snake_case_ : int = False
snake_case_ : str = 0.0352513
snake_case_ : int = TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ : int = 4
snake_case_ : Optional[int] = False
# hparam_utils.py hparams
snake_case_ : str = 36.4519
snake_case_ : Optional[Any] = 0.903421
snake_case_ : List[Any] = 222.088
snake_case_ : Optional[int] = True
snake_case_ : Optional[Any] = True
snake_case_ : str = True
snake_case_ : int = 0.763141
snake_case_ : str = TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
snake_case_ : List[Any] = TapasForSequenceClassification(config=__a )
elif task == "MLM":
snake_case_ : Optional[int] = TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ : Tuple = TapasModel(config=__a )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a , __a , __a )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__a )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 88
| 1
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__a = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a = truncation
__a = tokenize_kwargs
__a = {}
if return_tensors is not None:
__a = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , **_snake_case ) -> Dict[str, GenericTensor]:
'''simple docstring'''
__a = self.framework
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.model(**_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return super().__call__(*_snake_case , **_snake_case )
| 6
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = credit_card_number
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 2
for i in range(__snake_case, -1, -2 ):
# double the value of every second digit
_UpperCamelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCamelCase = cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__snake_case ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__snake_case ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__snake_case ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__snake_case ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 194
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> bool:
_lowerCamelCase = len(snake_case )
_lowerCamelCase = len(snake_case )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
A_ : int =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __init__( self , **a__ ):
super().__init__(**a__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_lowerCamelCase = kwargs.pop('encoder' )
_lowerCamelCase = encoder_config.pop('model_type' )
_lowerCamelCase = kwargs.pop('decoder' )
_lowerCamelCase = decoder_config.pop('model_type' )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = AutoConfig.for_model(a__ , **a__ )
_lowerCamelCase = True
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowerCamelCase = True
_lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.encoder.to_dict()
_lowerCamelCase = self.decoder.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ):
return 1e-4
@property
def snake_case_ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
_lowerCamelCase = OrderedDict()
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCamelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
import torch
_lowerCamelCase = OrderedDict()
_lowerCamelCase = super().generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
_lowerCamelCase , _lowerCamelCase = dummy_input['input_ids'].shape
_lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase = dummy_input.pop('input_ids' )
_lowerCamelCase = dummy_input.pop('attention_mask' )
_lowerCamelCase = torch.zeros(a__ )
return common_inputs
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
pass
def snake_case_ ( self , a__ ):
return VisionEncoderDecoderEncoderOnnxConfig(a__ )
def snake_case_ ( self , a__ , a__ , a__ = "default" ):
_lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a__ , a__ )
| 80
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( __A ):
__A : Union[str, Any] = "visual_bert"
def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : str = visual_embedding_dim
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : str = type_vocab_size
lowercase__ : Any = layer_norm_eps
lowercase__ : Tuple = bypass_transformer
lowercase__ : Tuple = special_visual_initialize
| 87
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
UpperCamelCase__ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ : List[Any] = ""
UpperCamelCase__ : str = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ : Union[str, Any] = [1 for i in range(len(__lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ : Any = 0
for j in range(len(__lowerCAmelCase ) ):
UpperCamelCase__ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ : Optional[int] = j - k + 1 # noqa: E741
UpperCamelCase__ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ : Tuple = length[j]
UpperCamelCase__ : Union[str, Any] = j
# create that string
UpperCamelCase__ : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if len(__lowerCAmelCase ) == 0:
return False
UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""")
| 196
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
while number > 0:
_snake_case = number % 10
sum_of_digits += last_digit
_snake_case = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100 ):
_snake_case = factorial(_SCREAMING_SNAKE_CASE )
_snake_case = split_and_add(_SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 341
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["pixel_values"]
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**UpperCAmelCase )
_snake_case = size if size is not None else {"""height""": 256, """width""": 256}
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
_snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_snake_case = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 341
| 1
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
UpperCAmelCase_ = [10, 20, 30, 40, 50, 60]
UpperCAmelCase_ = [2, 4, 6, 8, 10, 12]
UpperCAmelCase_ = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def _lowercase (self : List[str] ):
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def _lowercase (self : int ):
self.assertRaisesRegex(__a , "Weight can not be negative." )
def _lowercase (self : List[Any] ):
self.assertRaisesRegex(__a , "Profit can not be negative." )
def _lowercase (self : Union[str, Any] ):
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def _lowercase (self : Tuple ):
self.assertRaisesRegex(
__a , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 361
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106
| 0
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 10 ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or n < 0:
raise ValueError('''Invalid input''' )
_snake_case = 10**n
_snake_case = 2_84_33 * (pow(2 , 7_83_04_57 , _lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 288
|
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = []
for arabic, roman in ROMAN:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_torch
def __A ( self: Dict ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Dict ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Optional[int] ) -> Dict:
_A = '''
from transformers import pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_A = self.get_env()
_A = '''1'''
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __A ( self: Optional[int] ) -> int:
_A = '''
from transformers import AutoModel
'''
_A = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 364
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_torch
def __A ( self: Dict ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Dict ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Optional[int] ) -> Dict:
_A = '''
from transformers import pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_A = self.get_env()
_A = '''1'''
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __A ( self: Optional[int] ) -> int:
_A = '''
from transformers import AutoModel
'''
_A = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 75
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_SCREAMING_SNAKE_CASE = image.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] )
_SCREAMING_SNAKE_CASE = np.array(snake_case_ ).astype(np.floataa ) / 255.0
_SCREAMING_SNAKE_CASE = image[None].transpose(0 ,3 ,1 ,2 )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class __UpperCAmelCase (UpperCAmelCase__ ):
def __init__( self: Tuple , UpperCAmelCase_: VQModel , UpperCAmelCase_: UNetaDModel , UpperCAmelCase_: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=snake_case__ , unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self: Any , UpperCAmelCase_: Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 100 , UpperCAmelCase_: Optional[float] = 0.0 , UpperCAmelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_: Optional[str] = "pil" , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
if isinstance(snake_case__ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(snake_case__ , torch.Tensor ):
_SCREAMING_SNAKE_CASE = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case__ )}' )
if isinstance(snake_case__ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = preprocess(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels // 2, height, width)
_SCREAMING_SNAKE_CASE = next(self.unet.parameters() ).dtype
_SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
_SCREAMING_SNAKE_CASE = image.to(device=self.device , dtype=snake_case__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case__ , device=self.device )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for t in self.progress_bar(snake_case__ ):
# concat latents and low resolution image in the channel dimension.
_SCREAMING_SNAKE_CASE = torch.cat([latents, image] , dim=1 )
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(snake_case__ , snake_case__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# decode the image latents with the VQVAE
_SCREAMING_SNAKE_CASE = self.vqvae.decode(snake_case__ ).sample
_SCREAMING_SNAKE_CASE = torch.clamp(snake_case__ , -1.0 , 1.0 )
_SCREAMING_SNAKE_CASE = image / 2 + 0.5
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 306
|
lowercase_ : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase_ : list[bool | None] = [None] * 10_00_00_00
lowercase_ : Optional[int] = True
lowercase_ : str = False
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase = chain(next_number(snake_case_ ) )
_UpperCAmelCase = number_chain
while number < 1000_0000:
_UpperCAmelCase = number_chain
number *= 10
return number_chain
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000_0000 ):
'''simple docstring'''
for i in range(1 , snake_case_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 133
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase :
"""simple docstring"""
@staticmethod
def A_ ( *_UpperCAmelCase : Dict, **_UpperCAmelCase : Any ) -> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def A_ ( self : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ObjectDetectionPipeline(model=_a, image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def A_ ( self : str, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0 )
self.assertGreater(len(_a ), 0 )
for detected_object in outputs:
self.assertEqual(
_a, {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
}, )
import datasets
SCREAMING_SNAKE_CASE__ : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test" )
SCREAMING_SNAKE_CASE__ : Any = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE__ : List[str] = object_detector(_a, threshold=0.0 )
self.assertEqual(len(_a ), len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ), 0 )
for detected_object in outputs:
self.assertEqual(
_a, {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
}, )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
def A_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Any = AutoFeatureExtractor.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ObjectDetectionPipeline(model=_a, feature_extractor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0 )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
], )
SCREAMING_SNAKE_CASE__ : str = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
], threshold=0.0, )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
], )
@require_torch
@slow
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForObjectDetection.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = AutoFeatureExtractor.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Any = ObjectDetectionPipeline(model=_a, feature_extractor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
], )
SCREAMING_SNAKE_CASE__ : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
], )
@require_torch
@slow
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE__ : Any = pipeline("object-detection", model=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
], )
SCREAMING_SNAKE_CASE__ : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
], )
@require_torch
@slow
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.9985
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE__ : int = pipeline("object-detection", model=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=_a )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
{"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
], )
@require_torch
@require_pytesseract
@slow
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.9993
SCREAMING_SNAKE_CASE__ : str = pipeline("object-detection", model=_a, threshold=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_a, decimals=4 ), [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
], )
| 361
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = name
SCREAMING_SNAKE_CASE__ : Tuple = val
def __str__( self : str ) -> List[Any]:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.get_value(_UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return (idx - 1) // 2
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return idx * 2 + 2
def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase )
for idx, i in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = idx
SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val
for i in range(_UpperCAmelCase, -1, -1 ):
self.sift_down(_UpperCAmelCase, _UpperCAmelCase )
return array
def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = idx
if l < len(_UpperCAmelCase ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE__ : List[Any] = l
if r < len(_UpperCAmelCase ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE__ : int = r
if smallest != idx:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest
else:
break
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE__ : Dict = p
SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
return self.heap[0]
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE__ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.heap.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE__ : Tuple = new_value
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCamelCase : Tuple = Node('''R''', -1)
_lowerCamelCase : int = Node('''B''', 6)
_lowerCamelCase : str = Node('''A''', 3)
_lowerCamelCase : Optional[Any] = Node('''X''', 1)
_lowerCamelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCamelCase : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__lowerCAmelCase : Optional[Any] = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[int]:
__lowercase : Dict = EfficientNetConfig()
__lowercase : List[str] = CONFIG_MAP[model_name]['''hidden_dim''']
__lowercase : str = CONFIG_MAP[model_name]['''width_coef''']
__lowercase : Optional[Any] = CONFIG_MAP[model_name]['''depth_coef''']
__lowercase : List[str] = CONFIG_MAP[model_name]['''image_size''']
__lowercase : Tuple = CONFIG_MAP[model_name]['''dropout_rate''']
__lowercase : Tuple = CONFIG_MAP[model_name]['''dw_padding''']
__lowercase : List[Any] = '''huggingface/label-files'''
__lowercase : Optional[Any] = '''imagenet-1k-id2label.json'''
__lowercase : Tuple = 1_000
__lowercase : List[Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : List[str] = idalabel
__lowercase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ) -> Optional[Any]:
__lowercase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Optional[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
__lowercase : List[Any] = CONFIG_MAP[model_name]['''image_size''']
__lowercase : Union[str, Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__lowerCAmelCase , )
return preprocessor
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Any:
__lowercase : Any = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__lowercase : Dict = sorted(set(__lowerCAmelCase ) )
__lowercase : int = len(__lowerCAmelCase )
__lowercase : List[str] = {b: str(__lowerCAmelCase ) for b, i in zip(__lowerCAmelCase , range(__lowerCAmelCase ) )}
__lowercase : Optional[Any] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__lowercase : Dict = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__lowercase : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase : str = '''efficientnet.''' + item[1]
__lowercase : Optional[int] = '''classifier.weight'''
__lowercase : Optional[int] = '''classifier.bias'''
return key_mapping
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase : Optional[int] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase : Any = torch.from_numpy(__lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase : str = torch.from_numpy(__lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase : List[str] = torch.from_numpy(np.transpose(__lowerCAmelCase ) )
else:
__lowercase : Any = torch.from_numpy(__lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : Tuple = model_classes[model_name](
include_top=__lowerCAmelCase , weights='''imagenet''' , input_tensor=__lowerCAmelCase , input_shape=__lowerCAmelCase , pooling=__lowerCAmelCase , classes=1_000 , classifier_activation='''softmax''' , )
__lowercase : Any = original_model.trainable_variables
__lowercase : Dict = original_model.non_trainable_variables
__lowercase : List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase : Any = param.numpy()
__lowercase : int = list(tf_params.keys() )
# Load HuggingFace model
__lowercase : List[str] = get_efficientnet_config(__lowerCAmelCase )
__lowercase : Dict = EfficientNetForImageClassification(__lowerCAmelCase ).eval()
__lowercase : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__lowercase : Tuple = rename_keys(__lowerCAmelCase )
replace_params(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Initialize preprocessor and preprocess input image
__lowercase : Dict = convert_image_processor(__lowerCAmelCase )
__lowercase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = hf_model(**__lowerCAmelCase )
__lowercase : Dict = outputs.logits.detach().numpy()
# Original model inference
__lowercase : Optional[Any] = False
__lowercase : str = CONFIG_MAP[model_name]['''image_size''']
__lowercase : Tuple = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase : Optional[Any] = image.img_to_array(__lowerCAmelCase )
__lowercase : Dict = np.expand_dims(__lowerCAmelCase , axis=0 )
__lowercase : List[Any] = original_model.predict(__lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCAmelCase ):
os.mkdir(__lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCAmelCase )
preprocessor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
__lowercase : Optional[int] = F'efficientnet-{model_name}'
preprocessor.push_to_hub(__lowerCAmelCase )
hf_model.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 156
|
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase ={
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =["""CLIPFeatureExtractor"""]
lowercase =["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case , __snake_case: Any = [], []
while len(SCREAMING_SNAKE_CASE__) > 1:
__snake_case , __snake_case: List[str] = min(SCREAMING_SNAKE_CASE__), max(SCREAMING_SNAKE_CASE__)
start.append(SCREAMING_SNAKE_CASE__)
end.append(SCREAMING_SNAKE_CASE__)
collection.remove(SCREAMING_SNAKE_CASE__)
collection.remove(SCREAMING_SNAKE_CASE__)
end.reverse()
return start + collection + end
if __name__ == "__main__":
__UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : List[str] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 111
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 111
| 1
|
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: Tuple )-> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_snake_case : List[str] = (boundary[1] - boundary[0]) / steps
_snake_case : Dict = boundary[0]
_snake_case : str = boundary[1]
_snake_case : Any = make_points(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case : Union[str, Any] = 0.0
y += (h / 2.0) * f(lowerCAmelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase__ )
y += (h / 2.0) * f(lowerCAmelCase__ )
return y
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Any )-> str:
_snake_case : Optional[Any] = a + h
while x < (b - h):
yield x
_snake_case : int = x + h
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int: # enter your function here
_snake_case : List[str] = (x - 0) * (x - 0)
return y
def lowerCamelCase_ ( )-> Tuple:
_snake_case : Union[str, Any] = 0.0 # Lower bound of integration
_snake_case : List[Any] = 1.0 # Upper bound of integration
_snake_case : Dict = 1_0.0 # define number of steps or resolution
_snake_case : Any = [a, b] # define boundary of integration
_snake_case : List[str] = method_a(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 361
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Path , UpperCamelCase : Union[str, None] = None , UpperCamelCase : Union[List[str], None] = None , UpperCamelCase : Union[str, List[str], None] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : List[str] = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
_snake_case : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
_snake_case : Dict = [file for file in files if n_ not in file]
else:
_snake_case : Optional[int] = [file for file in files if n_identifier not in file]
_snake_case : List[Any] = ignore_files or []
ignore_files.append('__init__.py' )
_snake_case : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
_snake_case : Any = file.split('.' )[0]
try:
_snake_case : List[Any] = getattr(UpperCamelCase , UpperCamelCase )
_snake_case : Any = doctest.DocTestSuite(UpperCamelCase )
_snake_case : Tuple = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
_snake_case : Union[str, Any] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = Path('src/transformers' )
_snake_case : Optional[int] = 'modeling'
_snake_case : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = Path('src/transformers' )
_snake_case : Dict = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = Path('src/transformers' )
_snake_case : Optional[int] = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = Path('src/transformers' )
_snake_case : List[str] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = Path('docs/source' )
_snake_case : Optional[Any] = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 260
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase__ ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(UpperCamelCase__ ) # fails here
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 88
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88
| 1
|
"""simple docstring"""
import os
def _lowerCamelCase( a = "matrix.txt" ):
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
__a = in_file.read()
__a = [[int(a ) for cell in row.split("," )] for row in data.strip().splitlines()]
__a = [[0 for cell in row] for row in grid]
__a = len(grid[0] )
__a = [[0 for i in range(a )] for j in range(a )]
__a = grid[0][0]
for i in range(1 , a ):
__a = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
__a = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
__a = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 268
|
"""simple docstring"""
import os
def _lowerCamelCase( ):
with open(os.path.dirname(a ) + "/grid.txt" ) as f:
__a = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(a ) for x in f.readline().split()] )
__a = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 268
| 1
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ : Dict = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowercase_ ( unittest.TestCase ):
def __a ( self , a , a , a = None , a = None ):
UpperCamelCase__ = None
UpperCamelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase__ = os.path.abspath("examples" )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase__ = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase__ = compare_against_test(
os.path.join(a , a ) , a , a , a )
UpperCamelCase__ = "\n".join(a )
if special_strings is not None:
for string in special_strings:
UpperCamelCase__ = diff.replace(a , "" )
self.assertEqual(a , "" )
def __a ( self ):
self.one_complete_example("complete_nlp_example.py" , a )
self.one_complete_example("complete_nlp_example.py" , a )
def __a ( self ):
UpperCamelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , a , a , a )
self.one_complete_example("complete_cv_example.py" , a , a , a )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class lowercase_ ( a__ ):
__UpperCAmelCase = False
@classmethod
def __a ( cls ):
super().setUpClass()
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __a ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __a ( self ):
UpperCamelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __a ( self ):
UpperCamelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCamelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __a ( self ):
UpperCamelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
UpperCamelCase__ = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn("epoch 0:" , a )
self.assertIn("epoch 1:" , a )
def __a ( self ):
UpperCamelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
UpperCamelCase__ = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
UpperCamelCase__ = torch.cuda.device_count()
else:
UpperCamelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , a )
self.assertIn("epoch 1:" , a )
else:
self.assertIn("epoch 0:" , a )
self.assertIn("epoch 1:" , a )
@slow
def __a ( self ):
UpperCamelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase__ = run_command(self._launch_args + testargs , return_stdout=a )
UpperCamelCase__ = re.findall("({.+})" , a )
UpperCamelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase__ = ast.literal_eval(a )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __a ( self ):
UpperCamelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , "tracking" ) ) )
def __a ( self ):
UpperCamelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __a ( self ):
UpperCamelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 80
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80
| 1
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = torch.permute(a__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a__ ):
# linear layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
if "metadata" in layer:
__a = layer.split('''metadata''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__a = layer.split('''kvstore''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__a = layer.split('''/''' )
__a = '''/'''.join(split_layer[:-1] )
__a = (split_layer[-1],)
if "kvstore/path" in layer:
__a = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__a = '''file'''
else:
__a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
__a = rename_keys(a__ )
__a = {}
for k, v in current_block.items():
__a = v
__a = new_current_block
torch.save(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ = WEIGHTS_NAME ) -> List[Any]:
__a = convert_file_size_to_int(a__ )
__a = []
__a = {}
__a = 0
__a = 0
os.makedirs(a__ , exist_ok=a__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__a = flatten_dict(a__ , sep='''/''' )
__a = {}
for layer in checkpoint_info.keys():
__a , __a , __a = get_key_and_tensorstore_dict(
a__ , a__ , a__ )
if curr_real_layer_name in all_layers:
__a = content
else:
__a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__a = torch.tensor(a__ )
__a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__a , __a = rename_base_flax_keys(tuple(key.split('''/''' ) ) , a__ )
__a = '''/'''.join(a__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__a = os.path.join(
a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__a = {}
__a = 0
__a = raw_weights.to(getattr(a__ , a__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__a = {}
__a = {}
for idx, shard in enumerate(a__ ):
__a = weights_name.replace(
'''.bin''' , F"""-{idx+1:05d}-of-{len(a__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
__a = shard
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(a__ , indent=2 , sort_keys=a__ ) + '''\n'''
f.write(a__ )
return metadata, index
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A : Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCAmelCase ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__a = TaTokenizer.from_pretrained('''t5-small''' )
__a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__a = tokenizer(a__ , return_tensors='''pt''' ).input_ids
__a = model.generate(a__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 361
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( A : List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = min(UpperCamelCase__ ) # min() finds the minimum value
UpperCAmelCase_ : Tuple = max(UpperCamelCase__ ) # max() finds the maximum value
UpperCAmelCase_ : Any = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase_ : List[str] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase_ : List[str] = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase_ : int = count + min_val
i += 1
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''' , ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 304
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Any = parent
def snake_case__ ( self):
return {}
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
UpperCAmelCase__ : Tuple = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = MarkupLMFeatureExtractionTester(self)
@property
def snake_case__ ( self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self):
# Initialize feature_extractor
UpperCAmelCase__ : List[Any] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Optional[Any] = get_html_strings()[0]
UpperCAmelCase__ : Any = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : Dict = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
UpperCAmelCase__ : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
# Test batched
UpperCAmelCase__ : int = get_html_strings()
UpperCAmelCase__ : Optional[Any] = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : List[str] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
UpperCAmelCase__ : str = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
| 163
| 0
|
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowercase_ ( _a ):
'''simple docstring'''
__snake_case = """masked_bert"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[Any]=30_522 , __UpperCAmelCase : List[Any]=768 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : int=12 , __UpperCAmelCase : str=3_072 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Tuple=1e-1_2 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : List[str]="topK" , __UpperCAmelCase : Union[str, Any]="constant" , __UpperCAmelCase : List[Any]=0.0 , **__UpperCAmelCase : List[str] , ) ->List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = pruning_method
a = mask_init
a = mask_scale
| 370
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> None:
__UpperCAmelCase = value
__UpperCAmelCase = None
__UpperCAmelCase = None
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> None:
__UpperCAmelCase = tree
def lowerCAmelCase_ (self , lowercase__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__(self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333
| 1
|
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : int = len(_a )
snake_case_ : int = len(_a )
snake_case_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case_ : list = []
for char_count in range(_a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_a )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 354
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155
| 0
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case_ = json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case_ = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case_ = json.loads(__UpperCAmelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''', __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a ( _lowerCamelCase ):
snake_case_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def A_ ( self : Optional[Any] ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , lowercase_ , )
@cached_property
def A_ ( self : List[Any] ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
snake_case_ = torch.device('''cpu''' )
snake_case_ = 0
elif is_sagemaker_model_parallel_available():
snake_case_ = smp.local_rank()
snake_case_ = torch.device('''cuda''' , lowercase_ )
snake_case_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
snake_case_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
snake_case_ = torch.device('''cuda''' , self.local_rank )
snake_case_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
snake_case_ = torch.device('''cuda''' , self.local_rank )
snake_case_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def A_ ( self : Optional[int] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A_ ( self : Dict ):
return not is_sagemaker_model_parallel_available()
@property
def A_ ( self : Optional[Any] ):
return False
| 56
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase: Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a( A : Optional[Any] ) -> str:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a( A : Dict , A : List[Any] , A : str ) -> List[str]:
"""simple docstring"""
return max(metric_fn(A , A ) for gt in ground_truths )
def a( A : str , A : Optional[Any] , A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = [line.strip() for line in open(A , "r" ).readlines()]
a = []
if args.gold_data_mode == "qa":
a = pd.read_csv(A , sep="\t" , header=A )
for answer_list in data[1]:
a = ast.literal_eval(A )
answers.append(A )
else:
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [[reference] for reference in references]
a = a = a = 0
for prediction, ground_truths in zip(A , A ):
total += 1
em += metric_max_over_ground_truths(A , A , A )
fa += metric_max_over_ground_truths(A , A , A )
a = 100.0 * em / total
a = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def a( A : Dict , A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = args.k
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [line.strip() for line in open(A , "r" ).readlines()]
a = a = 0
for hypo, reference in zip(A , A ):
a = set(hypo.split("\t" )[:k] )
a = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def a( A : Dict , A : Any , A : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(A : Any ):
if title.startswith("\"" ):
a = title[1:]
if title.endswith("\"" ):
a = title[:-1]
return title
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A , )["input_ids"].to(args.device )
a = rag_model.rag.question_encoder(A )
a = question_enc_outputs[0]
a = rag_model.retriever(
A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a = []
for docs in all_docs:
a = [strip_title(A ) for title in docs["title"]]
provenance_strings.append("\t".join(A ) )
return provenance_strings
def a( A : Union[str, Any] , A : Optional[int] , A : Tuple ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A )
a = inputs_dict.input_ids.to(args.device )
a = inputs_dict.attention_mask.to(args.device )
a = rag_model.generate( # rag_model overwrites generate
A , attention_mask=A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a = rag_model.retriever.generator_tokenizer.batch_decode(A , skip_special_tokens=A )
if args.print_predictions:
for q, a in zip(A , A ):
logger.info("Q: {} - A: {}".format(A , A ) )
return answers
def a( ) -> Any:
"""simple docstring"""
a = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=A , choices=["exact", "compressed", "legacy"] , type=A , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=A , type=A , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=A , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=A , type=A , required=A , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=A , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=A , type=A , required=A , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=A , type=A , required=A , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=A , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=A , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=A , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=A , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=A , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=A , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a = parser.parse_args()
a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def a( A : Any ) -> Optional[Any]:
"""simple docstring"""
a = {}
if args.model_type is None:
a = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a = args.n_docs
if args.index_name is not None:
a = args.index_name
if args.index_path is not None:
a = args.index_path
else:
a = BartForConditionalGeneration
a = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , A )
a = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(A , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(A ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a = RagRetriever.from_pretrained(A , **A )
a = model_class.from_pretrained(A , retriever=A , **A )
model.retriever.init_retrieval()
else:
a = model_class.from_pretrained(A , **A )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a = []
for line in tqdm(A ):
questions.append(line.strip() )
if len(A ) == args.eval_batch_size:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) + "\n" )
preds_file.flush()
a = []
if len(A ) > 0:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) )
preds_file.flush()
score_fn(A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase: Optional[int] = get_args()
main(args)
| 227
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( a_ , a_ ) -> list:
"""simple docstring"""
if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
A_ : List[str] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCamelCase ) )
]
def UpperCAmelCase ( a_ ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
A_ : int = len(__lowerCamelCase )
A_ : Dict = matrix_length // 2
A_ : List[Any] = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )]
A_ : Optional[int] = [
[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )
]
A_ : int = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )]
A_ : Optional[int] = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase ( a_ ) -> tuple[int, int]:
"""simple docstring"""
return len(__lowerCamelCase ), len(matrix[0] )
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
print("""\n""".join(str(__lowerCamelCase ) for line in matrix ) )
def UpperCAmelCase ( a_ , a_ ) -> list:
"""simple docstring"""
if matrix_dimensions(__lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase )
A_ , A_ , A_ , A_ : Tuple = split_matrix(__lowerCamelCase )
A_ , A_ , A_ , A_ : str = split_matrix(__lowerCamelCase )
A_ : List[Any] = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
A_ : Tuple = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
A_ : Optional[Any] = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
A_ : int = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) )
A_ : Optional[Any] = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
A_ : Union[str, Any] = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
A_ : List[Any] = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) )
A_ : str = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
A_ : List[str] = matrix_addition(__lowerCamelCase , __lowerCamelCase )
A_ : List[Any] = matrix_addition(__lowerCamelCase , __lowerCamelCase )
A_ : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase )
# construct the new matrix from our 4 quadrants
A_ : Optional[Any] = []
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase ( a_ , a_ ) -> list:
"""simple docstring"""
if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]:
A_ : List[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(__lowerCamelCase )
A_ : str = matrix_dimensions(__lowerCamelCase )
A_ : List[str] = matrix_dimensions(__lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A_ : str = max(*__lowerCamelCase , *__lowerCamelCase )
A_ : Dict = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) )
A_ : List[Any] = matrixa
A_ : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A_ : Optional[Any] = actual_strassen(__lowerCamelCase , __lowerCamelCase )
# Removing the additional zeros
for i in range(0 , __lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase__ : str = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 368
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Dict = []
for part_id in partition_order:
A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
A_ : Optional[Any] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[str] = spark.range(1_0 ).repartition(2 )
A_ : List[str] = [1, 0]
A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Dict = spark.range(1_0 ).repartition(1 )
A_ : int = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A_ : Optional[int] = lambda a_ : x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(1_0_0 ).repartition(1 )
A_ : str = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 164
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = "▁"
__lowerCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = BigBirdTokenizer
A__ : List[str] = BigBirdTokenizerFast
A__ : Any = True
A__ : str = True
def snake_case_ ( self : Dict ):
super().setUp()
__lowercase : List[Any] = self.tokenizer_class(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : int ):
__lowercase : Union[str, Any] = '''<s>'''
__lowercase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_snake_case ) , 1004 )
def snake_case_ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_rust_tokenizer()
__lowercase : Optional[int] = '''I was born in 92000, and this is falsé.'''
__lowercase : str = tokenizer.tokenize(_snake_case )
__lowercase : List[str] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Union[str, Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : List[str] = self.get_rust_tokenizer()
__lowercase : Dict = tokenizer.encode(_snake_case )
__lowercase : str = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = BigBirdTokenizer(_snake_case , keep_accents=_snake_case )
__lowercase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [285, 46, 10, 170, 382] , )
__lowercase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase : List[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case_ ( self : List[Any] ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : int = '''Hello World!'''
__lowercase : Dict = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Tuple ):
__lowercase : Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase : List[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@require_torch
@slow
def snake_case_ ( self : Tuple ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowercase : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : str = ''' '''.join(_snake_case )
__lowercase : List[str] = self.big_tokenizer.encode_plus(_snake_case , return_tensors='''pt''' , return_token_type_ids=_snake_case )
__lowercase : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_snake_case )
__lowercase : List[Any] = BigBirdConfig(attention_type='''original_full''' )
__lowercase : int = BigBirdModel(_snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_snake_case )
model(**_snake_case )
@slow
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__lowercase : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def snake_case_ ( self : Any ):
# fmt: off
__lowercase : Any = {'''input_ids''': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 156
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCAmelCase : List[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCAmelCase : Dict = {
"jukebox": 512,
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple , _snake_case : Dict=["v3", "v2", "v2"] , _snake_case : Tuple=512 , _snake_case : Any=5 , _snake_case : List[Any]="<|endoftext|>" , **_snake_case : Union[str, Any] , ):
__lowercase : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
unk_token=_snake_case , n_genres=_snake_case , version=_snake_case , max_n_lyric_tokens=_snake_case , **_snake_case , )
__lowercase : List[str] = version
__lowercase : Union[str, Any] = max_n_lyric_tokens
__lowercase : Dict = n_genres
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : str = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Optional[int] = json.load(_snake_case )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Optional[int] = json.load(_snake_case )
__lowercase : Dict = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase : int = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__lowercase : Union[str, Any] = regex.compile(_snake_case )
__lowercase : int = {v: k for k, v in self.artists_encoder.items()}
__lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()}
__lowercase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case_ ( self : Tuple ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case_ ( self : Optional[Any] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case_ ( self : int , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict ):
__lowercase : Union[str, Any] = [self.artists_encoder.get(_snake_case , 0 ) for artist in list_artists]
for genres in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = [self.genres_encoder.get(_snake_case , 0 ) for genre in list_genres[genres]]
__lowercase : str = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase : int = [[self.lyrics_encoder.get(_snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case_ ( self : Dict , _snake_case : Any ):
return list(_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] , **_snake_case : Optional[int] ):
__lowercase , __lowercase , __lowercase : Optional[int] = self.prepare_for_tokenization(_snake_case , _snake_case , _snake_case )
__lowercase : List[Any] = self._tokenize(_snake_case )
return artist, genre, lyrics
def snake_case_ ( self : str , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase : Union[str, Any] = artists[idx].lower()
__lowercase : str = [genres[idx].lower()]
else:
__lowercase : Any = self._normalize(artists[idx] ) + '''.v2'''
__lowercase : Tuple = [
self._normalize(_snake_case ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase : Optional[int] = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__lowercase : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__lowercase : List[Any] = {vocab[index]: index + 1 for index in range(len(_snake_case ) )}
__lowercase : List[str] = 0
__lowercase : Any = len(_snake_case ) + 1
__lowercase : str = self.vocab
__lowercase : Union[str, Any] = {v: k for k, v in self.vocab.items()}
__lowercase : Dict = ''''''
else:
__lowercase : Tuple = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__lowercase : List[Any] = self._run_strip_accents(_snake_case )
__lowercase : Tuple = lyrics.replace('''\\''' , '''\n''' )
__lowercase : str = self.out_of_vocab.sub('''''' , _snake_case ), [], []
return artists, genres, lyrics
def snake_case_ ( self : Optional[int] , _snake_case : List[str] ):
__lowercase : Any = unicodedata.normalize('''NFD''' , _snake_case )
__lowercase : Optional[int] = []
for char in text:
__lowercase : Union[str, Any] = unicodedata.category(_snake_case )
if cat == "Mn":
continue
output.append(_snake_case )
return "".join(_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str ):
__lowercase : List[str] = (
[chr(_snake_case ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_snake_case ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__lowercase : Optional[Any] = frozenset(_snake_case )
__lowercase : Union[str, Any] = re.compile(r'''_+''' )
__lowercase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__lowercase : int = pattern.sub('''_''' , _snake_case ).strip('''_''' )
return text
def snake_case_ ( self : List[Any] , _snake_case : List[str] ):
return " ".join(_snake_case )
def snake_case_ ( self : List[str] , _snake_case : Any , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : bool = False ):
# Convert to TensorType
if not isinstance(_snake_case , _snake_case ):
__lowercase : Optional[Any] = TensorType(_snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__lowercase : int = tf.constant
__lowercase : Optional[int] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__lowercase : Union[str, Any] = torch.tensor
__lowercase : Dict = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__lowercase : Union[str, Any] = jnp.array
__lowercase : Optional[int] = _is_jax
else:
__lowercase : Tuple = np.asarray
__lowercase : str = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase : Union[str, Any] = [inputs]
if not is_tensor(_snake_case ):
__lowercase : int = as_tensor(_snake_case )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : str , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple="" , _snake_case : Tuple="pt" ):
__lowercase : List[str] = [0, 0, 0]
__lowercase : List[str] = [artist] * len(self.version )
__lowercase : List[Any] = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase : Tuple = self.tokenize(_snake_case , _snake_case , _snake_case )
__lowercase , __lowercase , __lowercase : List[str] = self._convert_token_to_id(_snake_case , _snake_case , _snake_case )
__lowercase : Optional[Any] = [-INFINITY] * len(full_tokens[-1] )
__lowercase : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : int = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_snake_case ) )
__lowercase : int = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_snake_case ) )
__lowercase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_snake_case ) )
return (artists_file, genres_file, lyrics_file)
def snake_case_ ( self : str , _snake_case : Tuple , _snake_case : str , _snake_case : Dict ):
__lowercase : List[str] = self.artists_decoder.get(_snake_case )
__lowercase : Optional[Any] = [self.genres_decoder.get(_snake_case ) for genre in genres_index]
__lowercase : Dict = [self.lyrics_decoder.get(_snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 156
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Optional[Any] = 16
__A : str = 32
def A_ ( snake_case_ : Accelerator ,snake_case_ : int = 1_6 ):
'''simple docstring'''
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase : Optional[int] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case_ ,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Optional[Any] = datasets.map(
snake_case_ ,batched=snake_case_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : str = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCamelCase : Any = 8
else:
UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
snake_case_ ,padding="""longest""" ,max_length=snake_case_ ,pad_to_multiple_of=snake_case_ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
UpperCamelCase : Dict = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case_ ,collate_fn=snake_case_ ,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : int = mocked_dataloaders # noqa: F811
def A_ ( snake_case_ : Tuple ,snake_case_ : Dict ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case_ ) == "1":
UpperCamelCase : Union[str, Any] = 2
# New Code #
UpperCamelCase : Dict = int(args.gradient_accumulation_steps )
UpperCamelCase : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config["""lr"""]
UpperCamelCase : int = int(config["""num_epochs"""] )
UpperCamelCase : int = int(config["""seed"""] )
UpperCamelCase : List[Any] = int(config["""batch_size"""] )
UpperCamelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(snake_case_ )
UpperCamelCase , UpperCamelCase : Dict = get_dataloaders(snake_case_ ,snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[Any] = AdamW(params=model.parameters() ,lr=snake_case_ )
# Instantiate scheduler
UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=snake_case_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(snake_case_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
with LocalSGD(
accelerator=snake_case_ ,model=snake_case_ ,local_sgd_steps=snake_case_ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCamelCase : Optional[Any] = model(**snake_case_ )
UpperCamelCase : Optional[int] = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Any = model(**snake_case_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case_ ,references=snake_case_ ,)
UpperCamelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' ,snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=snake_case_ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=snake_case_ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : List[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 27
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , ):
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : str = batch_size
UpperCamelCase : int = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Any = use_input_lengths
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Union[str, Any] = gelu_activation
UpperCamelCase : Dict = sinusoidal_embeddings
UpperCamelCase : Optional[int] = causal
UpperCamelCase : List[Any] = asm
UpperCamelCase : int = n_langs
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : str = n_special
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : List[str] = summary_type
UpperCamelCase : int = use_proj
UpperCamelCase : List[str] = scope
UpperCamelCase : Dict = bos_token_id
def a_ ( self ):
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase : int = None
UpperCamelCase : Dict = None
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Dict = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[int] = XLMModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , lengths=SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[Any] = XLMWithLMHeadModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[str] = XLMForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : int = XLMForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , p_mask=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Any = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , )
((UpperCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = XLMForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : int = self.num_labels
UpperCamelCase : int = XLMForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[Any] = self.num_choices
UpperCamelCase : Tuple = XLMForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[Any] = config_and_inputs
UpperCamelCase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase : Optional[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def a_ ( self ):
UpperCamelCase : List[Any] = XLMModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for iter_attentions in attentions] , [True] * len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(SCREAMING_SNAKE_CASE_ ):
# adds PAD dummy token
UpperCamelCase : int = min_length + idx + 1
UpperCamelCase : Tuple = min_length + idx + 1
UpperCamelCase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(SCREAMING_SNAKE_CASE_ ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for iter_hidden_states in hidden_states] , [True] * len(SCREAMING_SNAKE_CASE_ ) , )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(SCREAMING_SNAKE_CASE_ ):
# adds PAD dummy token
UpperCamelCase : List[str] = min_length + idx + 1
UpperCamelCase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(SCREAMING_SNAKE_CASE_ ) , )
pass
@slow
def a_ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = XLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Dict = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([[14, 447]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # the president
UpperCamelCase : List[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , SCREAMING_SNAKE_CASE_ )
| 27
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = GPTSanJapaneseTokenizer
__snake_case = False
__snake_case = {'do_clean_text': False, 'add_prefix_space': False}
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# fmt: off
A__ : Union[str, Any] =["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
A__ : Dict ={"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
A__ : List[Any] ={"""unk_token""": """<unk>"""}
A__ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def lowercase__ ( self : int , **lowerCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Tuple ="""こんにちは、世界。 \nこんばんは、㔺界。😀"""
A__ : int ="""こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
A__ , A__ : int =self.get_input_output_texts(lowerCAmelCase_ )
A__ : Any =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A__ : int =tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.get_tokenizer()
# Testing tokenization
A__ : Any ="""こんにちは、世界。 こんばんは、㔺界。"""
A__ : List[Any] =["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
A__ : Any =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A__ : List[str] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
A__ : Union[str, Any] =tokens + [tokenizer.unk_token]
A__ : Any =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A__ : Optional[Any] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
A__ : List[str] =self.get_tokenizer()
# Testing tokenization
A__ : str ="""こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
A__ : Optional[int] ="""こんにちは、、、、世界。こんばんは、、、、世界。"""
A__ : Tuple =tokenizer.encode(lowerCAmelCase_ )
A__ : List[str] =tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
A__ : Any ="""こんにちは、世界。"""
A__ : List[str] ="""こんばんは、㔺界。😀"""
A__ : Optional[Any] ="""こんにちは、世界。こんばんは、世界。😀"""
A__ : Optional[int] =tokenizer.encode(prefix_text + input_text )
A__ : Any =tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
A__ : Dict =tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
A__ : Optional[Any] =tokenizer.decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.decode(lowerCAmelCase_ )
A__ : int =tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
A__ : Union[str, Any] ="""こんにちは、世界。"""
A__ : int ="""こんばんは、㔺界。😀"""
A__ : Optional[Any] =len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
A__ : List[Any] =len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
A__ : Optional[int] =[1] + [0] * (len_prefix + len_text + 1)
A__ : Optional[Any] =[1] * (len_prefix + len_text + 1) + [0]
A__ : Union[str, Any] =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
A__ : List[str] =tokenizer(prefix_text + input_text ).token_type_ids
A__ : Dict =tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
A__ : Tuple =tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
A__ : Any =tokenizer.encode("""あンいワ""" )
A__ : str =tokenizer.encode("""""" , prefix_text="""あンいワ""" )
A__ : Union[str, Any] =tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
A__ : str =[["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
A__ : List[Any] =tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
A__ : Any =tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
A__ : int =[[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
A__ : Optional[Any] =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A__ : Union[str, Any] =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
# tokenizer has no padding token
pass
| 134
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __snake_case : str, __snake_case : dict ) -> str:
"""simple docstring"""
A__ : Optional[Any] =BeautifulSoup(requests.get(__snake_case, params=__snake_case ).content, """html.parser""" )
A__ : List[str] =soup.find("""div""", attrs={"""class""": """gs_ri"""} )
A__ : Tuple =div.find("""div""", attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
__snake_case : Optional[Any] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 134
| 1
|
"""simple docstring"""
from math import pi
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 371
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : List[Any] = True
__snake_case : Optional[int] = "ml.p3.2xlarge"
__snake_case : List[str] = "accelerate_sagemaker_execution_role"
__snake_case : Tuple = "hf-sm"
__snake_case : Any = "us-east-1"
__snake_case : Union[str, Any] = 1
__snake_case : Dict = "accelerate-sagemaker-1"
__snake_case : Tuple = "1.6"
__snake_case : List[str] = "4.4"
__snake_case : str = "train.py"
__snake_case : List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__snake_case : Optional[int] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""do_train"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""epochs"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""learning_rate"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""max_steps"""] ,lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 193
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCamelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCamelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCamelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCamelCase__ , default=0 , help='cuda_id.' , )
__lowerCamelCase : Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not len(lowerCamelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = imgs[0].size
__lowerCamelCase : List[Any] = Image.new('RGB' , size=(cols * w, rows * h) )
__lowerCamelCase , __lowerCamelCase : List[str] = grid.size
for i, img in enumerate(lowerCamelCase__ ):
grid.paste(lowerCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="robotic cat with wings" , lowerCamelCase__=7.5 , lowerCamelCase__=5_0 , lowerCamelCase__=1 , lowerCamelCase__=4_2 , ) -> int:
__lowerCamelCase : Any = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase__ )
__lowerCamelCase : int = pipeline(
lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , ).images
__lowerCamelCase : Union[str, Any] = int(math.sqrt(lowerCamelCase__ ) )
__lowerCamelCase : str = image_grid(lowerCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a =parse_args()
# Load models and create wrapper for stable diffusion
a =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
a =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
a =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
a =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
a =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
a =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
a =unet.to(torch.device("""cuda""", args.cuda_id))
a =pipeline.to(unet.device)
a , a =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
a =os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 73
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : List[str] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : List[Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
| 1
|
'''simple docstring'''
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :int = len(lowerCamelCase_ )
UpperCamelCase__ :int = len(lowerCamelCase_ )
UpperCamelCase__ :int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase__ :list = []
for char_count in range(lowerCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 358
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__snake_case = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = ['input_ids', 'attention_mask']
_a = DistilBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Optional[Any] = do_lower_case
UpperCamelCase__ :Optional[Any] = strip_accents
UpperCamelCase__ :List[Any] = tokenize_chinese_chars
UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :int = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 219
| 0
|
class __A:
def __init__( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = name
__a = val
def __str__( self ) -> List[Any]:
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.val < other.val
class __A:
def __init__( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = {}
__a = {}
__a = self.build_heap(_snake_case )
def __getitem__( self , _snake_case ) -> Any:
'''simple docstring'''
return self.get_value(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = len(_snake_case ) - 1
__a = self.get_parent_idx(_snake_case )
for idx, i in enumerate(_snake_case ):
__a = idx
__a = i.val
for i in range(_snake_case , -1 , -1 ):
self.sift_down(_snake_case , _snake_case )
return array
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
while True:
__a = self.get_left_child_idx(_snake_case ) # noqa: E741
__a = self.get_right_child_idx(_snake_case )
__a = idx
if l < len(_snake_case ) and array[l] < array[idx]:
__a = l
if r < len(_snake_case ) and array[r] < array[smallest]:
__a = r
if smallest != idx:
__a , __a = array[smallest], array[idx]
(
(
__a
) , (
__a
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__a = smallest
else:
break
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_parent_idx(_snake_case )
while p >= 0 and self.heap[p] > self.heap[idx]:
__a , __a = self.heap[idx], self.heap[p]
__a , __a = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__a = p
__a = self.get_parent_idx(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.heap[0]
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a = self.heap[-1], self.heap[0]
__a , __a = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__a = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
self.heap.append(_snake_case )
__a = len(self.heap ) - 1
__a = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__a = new_value
__a = new_value
self.sift_up(self.idx_of_element[node] )
A : Union[str, Any] = Node('R', -1)
A : Union[str, Any] = Node('B', 6)
A : List[Any] = Node('A', 3)
A : List[Any] = Node('X', 1)
A : Union[str, Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A : Union[str, Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106
| 0
|
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = AutoConfig.from_pretrained(__a )
_a : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__a )
_a : Optional[Any] = checkpoints.load_tax_checkpoint(__a )
_a : Tuple = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_a : Optional[int] = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : int = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Optional[Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_a : Optional[int] = F"""layers_{str(__a )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[Any] = flax_model.params['''encoder''']['''block'''][str(__a )]['''layer''']
_a : Union[str, Any] = tax_attention_key
_a : int = tax_attention_out
_a : str = tax_attention_query
_a : Optional[Any] = tax_attention_value
_a : Union[str, Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Dict = tax_global_layer_norm
if split_mlp_wi:
_a : Any = tax_mlp_wi_a
_a : int = tax_mlp_wi_a
else:
_a : Dict = tax_mlp_wi
_a : int = tax_mlp_wo
_a : Tuple = tax_mlp_layer_norm
_a : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
_a : Tuple = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Tuple = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : int = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_a : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_a : List[Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_a : Optional[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : str = F"""layers_{str(__a )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_a : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_a : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_a : Optional[int] = tax_enc_dec_attention_module['''key''']['''kernel''']
_a : str = tax_enc_dec_attention_module['''out''']['''kernel''']
_a : List[Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
_a : str = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_a : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : Optional[Any] = flax_model.params['''decoder''']['''block'''][str(__a )]['''layer''']
_a : List[Any] = tax_attention_key
_a : Optional[int] = tax_attention_out
_a : str = tax_attention_query
_a : Tuple = tax_attention_value
_a : List[str] = tax_pre_attention_layer_norm
_a : Any = tax_enc_dec_attention_key
_a : Dict = tax_enc_dec_attention_out
_a : Optional[Any] = tax_enc_dec_attention_query
_a : List[str] = tax_enc_dec_attention_value
_a : Dict = tax_cross_layer_norm
if split_mlp_wi:
_a : Optional[int] = tax_mlp_wi_a
_a : Dict = tax_mlp_wi_a
else:
_a : Optional[Any] = tax_mlp_wi
_a : Any = tax_mlp_wo
_a : Optional[Any] = txa_mlp_layer_norm
_a : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
_a : Tuple = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_a : str = txa_decoder_norm
# Only for layer 0:
_a : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : str = tax_decoder_rel_embedding
# Token Embeddings
_a : Optional[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
_a : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Any = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 352
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase: List[Any] = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: List[str] = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: List[Any] = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowercase: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
from typing import Any
def __UpperCamelCase ( _A ):
if not input_list:
return []
lowerCAmelCase_ = [input_list.count(_A ) for value in input_list]
lowerCAmelCase_ = max(_A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 167
| 0
|
"""simple docstring"""
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = b, a + b
yield b
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : List[str] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 86
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26
| 0
|
'''simple docstring'''
import baseaa
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
return baseaa.aaadecode(a_ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
import math
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
A_ : List[Any] = [True] * n
A_ : List[Any] = False
A_ : Union[str, Any] = False
A_ : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A_ : Optional[Any] = i * 2
while index < n:
A_ : Any = False
A_ : str = index + i
A_ : List[str] = [2]
for i in range(3 , a_ , 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def UpperCAmelCase ( a_ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
"""simple docstring"""
A_ : Any = math.floor(math.sqrt(a_ ) ) + 1_0_0
A_ : int = prime_sieve(a_ )
A_ : int = 0
A_ : Union[str, Any] = 0
A_ : List[str] = primes[prime_index]
while (last_prime**2) <= limit:
A_ : Tuple = primes[prime_index + 1]
A_ : List[Any] = last_prime**2
A_ : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
A_ : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ : Any = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 164
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( _a ):
_a : Tuple = ["""audio_values""", """audio_mask"""]
def __init__( self , _A=2_0_4_8 , _A=1 , _A=[1_6, 1_6] , _A=1_2_8 , _A=4_4_1_0_0 , _A=8_6 , _A=2_0_4_8 , _A=0.0 , **_A , ):
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , )
__lowerCAmelCase = spectrogram_length
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = feature_size // self.patch_size[1]
__lowerCAmelCase = n_fft
__lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = padding_value
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=_A , norm="slaney" , mel_scale="slaney" , ).T
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = spectrogram(
_A , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__lowerCAmelCase = log_spec[:, :-1]
__lowerCAmelCase = log_spec - 20.0
__lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _A , _A = None , _A = True , _A = None , _A = False , _A = False , **_A , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowerCAmelCase = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__lowerCAmelCase = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _A ):
__lowerCAmelCase = [np.asarray(_A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase = np.array(_A ).astype(np.floataa )
# convert into correct format for padding
__lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase = np.ones([len(_A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(_A ) ):
__lowerCAmelCase = audio_features[i]
__lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__lowerCAmelCase = {"audio_values": padded_audio_features}
__lowerCAmelCase = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 92
|
"""simple docstring"""
def lowercase (snake_case__ : list[int] , snake_case__ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
# Calculate the determinants of the matrices
lowerCAmelCase = aa * ba - aa * ba
lowerCAmelCase = ca * ba - ca * ba
lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase = determinant_x / determinant
lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 155
| 0
|
'''simple docstring'''
from math import sqrt
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Dict = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int:
_a : Union[str, Any] = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase = 3
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
print('Generating primitive root of p' )
while True:
_a : List[Any] = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_a : int = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_a : List[str] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_a : Any = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_a : List[Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_a : Tuple = (key_size, e_a, e_a, p)
_a : str = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_a , _a : Dict = generate_key(lowerCAmelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 107
| 1
|
from typing import Any
class __lowerCAmelCase :
def __init__( self: Union[str, Any] , _lowerCAmelCase: Any ):
lowercase :List[str] = data
lowercase :Any = None
class __lowerCAmelCase :
def __init__( self: Union[str, Any] ):
lowercase :List[Any] = None
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = self.head
while temp is not None:
print(temp.data , end=" " )
lowercase :Tuple = temp.next
print()
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Any ):
lowercase :Union[str, Any] = Node(_lowerCAmelCase )
lowercase :str = self.head
lowercase :Tuple = new_node
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Dict ):
if node_data_a == node_data_a:
return
else:
lowercase :Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase :List[Any] = node_a.next
lowercase :str = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase :int = node_a.next
if node_a is None or node_a is None:
return
lowercase , lowercase :Dict = node_a.data, node_a.data
if __name__ == "__main__":
_UpperCAmelCase : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 236
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "arrow" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
UpperCamelCase__ = load_from_cache_file
UpperCamelCase__ = file_format
UpperCamelCase__ = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def UpperCAmelCase_ (self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 357
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( __a : Any ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"""a""": 1, """b""": 2}
UpperCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"""a""": 2, """b""": 3}
UpperCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 178
| 0
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowercase : Tuple = get_logger(__name__)
class __UpperCamelCase :
def __init__( self , __a , __a=None ):
'''simple docstring'''
__a : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , __a , getattr(__a , __a ) )
__a : Dict = module._original_module if isinstance(__a , _PatchedModuleObj ) else module
class __UpperCamelCase :
A_ = []
def __init__( self , __a , __a , __a , __a=None ):
'''simple docstring'''
__a : int = obj
__a : Union[str, Any] = target
__a : int = new
__a : Dict = target.split('.' )[0]
__a : Any = {}
__a : Any = attrs or []
def __enter__( self ):
'''simple docstring'''
*__a , __a : Dict = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
__a : str = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__a : Optional[int] = getattr(self.obj , __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__a : List[str] = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) )
__a : List[str] = getattr(self.obj , __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) )
__a : str = getattr(__a , __a )
# finally set the target attribute
setattr(__a , __a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__a : Any = getattr(import_module('.'.join(__a ) ) , __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a ) is attr_value:
__a : List[Any] = getattr(self.obj , __a )
setattr(self.obj , __a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__a : str = globals()['__builtins__'][target_attr]
setattr(self.obj , __a , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *__a ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , __a , self.original.pop(__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 27
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
if len(A_ ) == 0:
return array
_lowerCamelCase : Dict = min(A_ ), max(A_ )
# Compute the variables
_lowerCamelCase : List[str] = _max - _min + 1
_lowerCamelCase : List[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCamelCase : List[Any] = i - _min
_lowerCamelCase : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCamelCase : Any = 0
for i in range(A_ ):
while holes_repeat[i] > 0:
_lowerCamelCase : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by comma:\n''')
lowerCAmelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 359
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : List[str] = False
elif args.student_type == "gpt2":
_lowerCamelCase : Any = False
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Optional[int] = False
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''', action='''store_true''', help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''', type=A_, required=A_, help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''', type=A_, required=A_, help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''', )
parser.add_argument(
'''--student_type''', type=A_, choices=['''distilbert''', '''roberta''', '''gpt2'''], required=A_, help='''The student type (DistilBERT, RoBERTa).''', )
parser.add_argument('''--student_config''', type=A_, required=A_, help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''', default=A_, type=A_, help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''', choices=['''bert''', '''roberta''', '''gpt2'''], required=A_, help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''', type=A_, required=A_, help='''The teacher model.''' )
parser.add_argument('''--temperature''', default=2.0, type=A_, help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''', default=0.5, type=A_, help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''', default=0.0, type=A_, help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''', )
parser.add_argument('''--alpha_clm''', default=0.5, type=A_, help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''', default=0.0, type=A_, help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''', default=0.0, type=A_, help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''', action='''store_true''', help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''', default=0.15, type=A_, help='''Proportion of tokens for which we need to make a prediction.''', )
parser.add_argument('''--word_mask''', default=0.8, type=A_, help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''', default=0.1, type=A_, help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''', default=0.1, type=A_, help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''', default=0.7, type=A_, help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''', )
parser.add_argument('''--token_counts''', type=A_, help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''', action='''store_true''', help='''If true, compute the distillation loss only the [MLM] prediction distribution.''', )
parser.add_argument(
'''--freeze_pos_embs''', action='''store_true''', help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''', )
parser.add_argument(
'''--freeze_token_type_embds''', action='''store_true''', help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''', )
parser.add_argument('''--n_epoch''', type=A_, default=3, help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''', type=A_, default=5, help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''', action='''store_false''', help='''If true, group sequences that have similar length into the same batch. Default is true.''', )
parser.add_argument(
'''--gradient_accumulation_steps''', type=A_, default=50, help='''Gradient accumulation for larger training batches.''', )
parser.add_argument('''--warmup_prop''', default=0.05, type=A_, help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''', default=0.0, type=A_, help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''', default=5E-4, type=A_, help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''', default=1E-6, type=A_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', default=5.0, type=A_, help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''', default=0.02, type=A_, help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=A_, default='''O1''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_gpu''', type=A_, default=1, help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''', type=A_, default=-1, help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''', type=A_, default=56, help='''Random seed''' )
parser.add_argument('''--log_interval''', type=A_, default=5_00, help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''', type=A_, default=40_00, help='''Checkpoint interval.''' )
_lowerCamelCase : List[Any] = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, '''parameters.json''' ), '''w''' ) as f:
json.dump(vars(A_ ), A_, indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Optional[int] = tokenizer.all_special_tokens.index(A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCamelCase : Optional[Any] = special_tok_ids
_lowerCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, '''rb''' ) as fp:
_lowerCamelCase : Any = pickle.load(A_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, '''rb''' ) as fp:
_lowerCamelCase : str = pickle.load(A_ )
_lowerCamelCase : List[Any] = np.maximum(A_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : List[Any] = 0.0 # do not predict special tokens
_lowerCamelCase : str = torch.from_numpy(A_ )
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = LmSeqsDataset(params=A_, data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCamelCase : str = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCamelCase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A_ )
else:
_lowerCamelCase : Optional[Any] = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase : int = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_, A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_, A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[int] = Distiller(
params=A_, dataset=A_, token_probs=A_, student=A_, teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 175
| 0
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ) -> Dict:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
__lowerCAmelCase = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
__lowerCAmelCase = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def lowercase ( self : Dict ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
__lowerCAmelCase = FlaxBertModel.from_pretrained(__lowerCamelCase )
__lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : str ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def lowercase ( self : str ) -> List[Any]:
for model_name in ["roberta-base", "roberta-large"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
__lowerCAmelCase = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
__lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : Any ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def lowercase ( self : Optional[Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('bert-base' )
def lowercase ( self : Tuple ) -> Tuple:
with self.assertRaisesRegex(
__lowerCamelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision='aaaaaa' )
def lowercase ( self : Any ) -> List[str]:
with self.assertRaisesRegex(
__lowerCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase ( self : List[str] ) -> Any:
with self.assertRaisesRegex(__lowerCamelCase , 'Use `from_pt=True` to load this model' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 284
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a__: Dict = logging.get_logger(__name__)
a__: str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Any = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
a__: Any = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
a__: Optional[Any] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_a , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_a , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_a , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_a , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_a , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_a , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_a , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_a , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_a , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_a , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_a , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_a , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_a , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_a , default=0.1_5 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_a , required=_a , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_a , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCAmelCase_ : str = parser.parse_args()
return args
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_a )
tf.tpu.experimental.initialize_tpu_system(_a )
return tpu
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for file in file_list:
UpperCAmelCase_ : Union[str, Any] = file.split("""/""" )[-1]
UpperCAmelCase_ : int = re.search(r"""-\d+-(\d+)\.tfrecord""" , _a ).group(1 )
UpperCAmelCase_ : List[str] = int(_a )
num_samples += sample_count
return num_samples
def lowerCamelCase_ ( _a : str , _a : Optional[int] , _a : Union[str, Any] , _a : List[Any] , _a : List[str] , _a : str=None ):
'''simple docstring'''
UpperCAmelCase_ : Dict = count_samples(_a )
UpperCAmelCase_ : Union[str, Any] = tf.data.Dataset.from_tensor_slices(_a )
if shuffle:
UpperCAmelCase_ : List[str] = dataset.shuffle(len(_a ) )
UpperCAmelCase_ : Any = tf.data.TFRecordDataset(_a , num_parallel_reads=_a )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : Any = dataset.apply(tf.data.experimental.assert_cardinality(_a ) )
UpperCAmelCase_ : List[str] = dataset.map(_a , num_parallel_calls=_a )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Any = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : str = dataset.batch(_a , drop_remainder=_a )
UpperCAmelCase_ : Union[str, Any] = dataset.map(_a , num_parallel_calls=_a )
UpperCAmelCase_ : str = dataset.prefetch(_a )
return dataset
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ : List[str] = initialize_tpu(_a )
UpperCAmelCase_ : Tuple = tf.distribute.TPUStrategy(_a )
else:
UpperCAmelCase_ : List[Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : str = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : List[str] = tokenizer.vocab_size
UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase_ : str = count_samples(_a )
UpperCAmelCase_ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_config(_a )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = create_optimizer(
num_train_steps=_a , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_a , metrics=["""accuracy"""] )
def decode_fn(_a : Tuple ):
UpperCAmelCase_ : Union[str, Any] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_a , _a )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=_a , mlm_probability=args.mlm_probability , mlm=_a , return_tensors="""tf""" )
def mask_with_collator(_a : Optional[Any] ):
# TF really needs an isin() function
UpperCAmelCase_ : List[str] = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_a ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_a , )
return batch
UpperCAmelCase_ : Any = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : int = prepare_dataset(
_a , decode_fn=_a , mask_fn=_a , batch_size=_a , shuffle=_a , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ : List[str] = prepare_dataset(
_a , decode_fn=_a , mask_fn=_a , batch_size=_a , shuffle=_a , )
UpperCAmelCase_ : int = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_a ) )
model.fit(
_a , validation_data=_a , epochs=args.num_epochs , callbacks=_a , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 59
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59
| 1
|
from sklearn.metrics import fa_score
import datasets
A__ : Union[str, Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
A__ : Optional[int] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
A__ : Dict = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ), reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''], )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple=None, lowerCamelCase : str=1, lowerCamelCase : Union[str, Any]="binary", lowerCamelCase : Tuple=None ):
'''simple docstring'''
lowercase__ = fa_score(
lowerCamelCase, lowerCamelCase, labels=lowerCamelCase, pos_label=lowerCamelCase, average=lowerCamelCase, sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 207
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [1]
lowercase__ , lowercase__ , lowercase__ = 0, 0, 0
lowercase__ = ugly_nums[ia] * 2
lowercase__ = ugly_nums[ia] * 3
lowercase__ = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ugly_nums.append(lowerCamelCase_ )
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }")
| 207
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __lowerCAmelCase (_UpperCamelCase ):
for pegasus_name, hf_name in PATTERNS:
__lowerCAmelCase : List[Any] = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCAmelCase : List[Any] = PegasusConfig(**_UpperCamelCase )
__lowerCAmelCase : Dict = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = torch_model.model.state_dict()
__lowerCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
__lowerCAmelCase : str = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__lowerCAmelCase : Tuple = v.T
__lowerCAmelCase : List[Any] = torch.tensor(_UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__lowerCAmelCase : Optional[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__lowerCAmelCase : List[str] = mapping['shared.weight']
__lowerCAmelCase : List[Any] = mapping['shared.weight']
__lowerCAmelCase : Union[str, Any] = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = torch_model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
__lowerCAmelCase : Tuple = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def __lowerCAmelCase (_UpperCamelCase="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCAmelCase : Dict = tf.train.list_variables(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : Union[str, Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(_UpperCamelCase , desc='converting tf checkpoint to dict' ):
__lowerCAmelCase : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase : int = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : int = array
return tf_weights
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# save tokenizer first
__lowerCAmelCase : List[str] = Path(_UpperCamelCase ).parent.name
__lowerCAmelCase : int = task_specific_params[F"summarization_{dataset}"]['max_position_embeddings']
__lowerCAmelCase : Any = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCAmelCase : List[Any] = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCAmelCase : List[Any] = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
__lowerCAmelCase : str = task_specific_params
__lowerCAmelCase : Optional[Any] = convert_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_UpperCamelCase , Path(_UpperCamelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ = parser.parse_args()
if args.save_dir is None:
lowerCamelCase__ = Path(args.tf_ckpt_path).parent.name
lowerCamelCase__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 355
|
"""simple docstring"""
import qiskit
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCAmelCase : str = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCAmelCase : Optional[int] = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 182
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ :Optional[int] = '▁'
lowerCAmelCase__ :List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a : Optional[int] = BigBirdTokenizer
_a : str = BigBirdTokenizerFast
_a : Dict = True
_a : Optional[Any] = True
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = self.tokenizer_class(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = '<s>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1004 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = BigBirdTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'Hello World!'
_UpperCAmelCase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
_UpperCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = ' '.join(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors='pt' , return_token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = BigBirdConfig(attention_type='original_full' )
_UpperCAmelCase = BigBirdModel(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
_UpperCAmelCase = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 329
|
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = [0] * len(a_ )
__A = []
__A = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
__A = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE :List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __snake_case ( lowerCamelCase__ ):
def __init__( self , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={}
UpperCAmelCase : Union[str, Any] ={}
UpperCAmelCase : List[Any] ={}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase : Optional[Any] =kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
UpperCAmelCase : int =kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
UpperCAmelCase : Union[str, Any] =kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase : List[Any] =kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase : List[str] =kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase : Dict =kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
UpperCAmelCase : Optional[int] =kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
UpperCAmelCase : Union[str, Any] =kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
UpperCAmelCase : int =kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase : str =kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
UpperCAmelCase : Optional[int] =kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase : List[Any] =kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case__ , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> int:
'''simple docstring'''
return super().__call__(snake_case__ , *snake_case__ , num_workers=snake_case__ , batch_size=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=64 , snake_case__ = 0 , snake_case__ = 512 / 1500 , snake_case__ = 32 , snake_case__ = 1 , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =load_image(snake_case__ )
UpperCAmelCase : Dict =self.image_processor.size['''longest_edge''']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] =self.image_processor.generate_crop_boxes(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =self.image_processor(images=snake_case__ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase : int =self.get_inference_context()
with inference_context():
UpperCAmelCase : Optional[Any] =self._ensure_tensor_on_device(snake_case__ , device=self.device )
UpperCAmelCase : Union[str, Any] =self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
UpperCAmelCase : Optional[int] =image_embeddings
UpperCAmelCase : Union[str, Any] =grid_points.shape[1]
UpperCAmelCase : List[Any] =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase : List[str] =input_labels[:, i : i + points_per_batch]
UpperCAmelCase : str =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0.88 , snake_case__=0.95 , snake_case__=0 , snake_case__=1 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =model_inputs.pop('''input_boxes''' )
UpperCAmelCase : str =model_inputs.pop('''is_last''' )
UpperCAmelCase : Tuple =model_inputs.pop('''original_sizes''' ).tolist()
UpperCAmelCase : int =model_inputs.pop('''reshaped_input_sizes''' ).tolist()
UpperCAmelCase : Any =self.model(**snake_case__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase : int =model_outputs['''pred_masks''']
UpperCAmelCase : Any =self.image_processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , binarize=snake_case__ )
UpperCAmelCase : Optional[Any] =model_outputs['''iou_scores''']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=0.7 , ) -> Any:
'''simple docstring'''
UpperCAmelCase : int =[]
UpperCAmelCase : Any =[]
UpperCAmelCase : Optional[Any] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
UpperCAmelCase : Union[str, Any] =torch.cat(snake_case__ )
UpperCAmelCase : int =torch.cat(snake_case__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str =self.image_processor.post_process_for_mask_generation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : str =defaultdict(snake_case__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case__ )
UpperCAmelCase : Union[str, Any] ={}
if output_rle_mask:
UpperCAmelCase : int =rle_mask
if output_bboxes_mask:
UpperCAmelCase : str =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 78
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( __lowerCAmelCase = "isbn/0140328726" )-> dict:
'''simple docstring'''
UpperCAmelCase : Tuple =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase : Any =f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCAmelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCAmelCase_ ( __lowerCAmelCase )-> dict:
'''simple docstring'''
UpperCAmelCase : int ={
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase : Tuple ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase : Optional[int] =[
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase : List[str] =data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : str =''', '''.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 78
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase :Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase :Dict = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase :List[str] = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
lowerCAmelCase :Optional[Any] = '▁'
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , _A : Dict , _A : Any="<s>" , _A : Optional[int]="</s>" , _A : int="</s>" , _A : Union[str, Any]="<s>" , _A : str="<unk>" , _A : Any="<pad>" , _A : str="<mask>" , _A : List[Any] = None , **_A : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__magic_name__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__magic_name__ : Optional[int] = vocab_file
__magic_name__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
__magic_name__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__magic_name__ : str = len(self.sp_model ) - 1
__magic_name__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self : Any , _A : Tuple , _A : Any = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
__magic_name__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : Tuple = None , _A : int = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : int = None ) -> List[int]:
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
return len(self.sp_model )
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : Any = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Tuple , _A : Dict ) -> List[str]:
return self.sp_model.encode(_A , out_type=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : List[Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : Union[str, Any] = self.sp_model.PieceToId(_A )
return spm_id if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_A )
def __lowerCAmelCase ( self : List[str] , _A : Any ) -> Dict:
__magic_name__ : Tuple = []
__magic_name__ : Optional[int] = ''''''
__magic_name__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
__magic_name__ : Dict = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_A )
__magic_name__ : str = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self : Tuple ) -> List[Any]:
__magic_name__ : Dict = self.__dict__.copy()
__magic_name__ : Optional[int] = None
return state
def __setstate__( self : Dict , _A : Union[str, Any] ) -> str:
__magic_name__ : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__magic_name__ : List[str] = {}
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : str , _A : int , _A : Dict = None ) -> Tuple[str]:
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Optional[int] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 331
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263
| 0
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __a ( ) ->Dict:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=UpperCAmelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=UpperCAmelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=UpperCAmelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=UpperCAmelCase , default=0 , help="""cuda_id.""" , )
A = parser.parse_args()
return args
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if not len(UpperCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
A , A = imgs[0].size
A = Image.new("""RGB""" , size=(cols * w, rows * h) )
A , A = grid.size
for i, img in enumerate(UpperCAmelCase ):
grid.paste(UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def __a ( UpperCAmelCase , UpperCAmelCase="robotic cat with wings" , UpperCAmelCase=7.5 , UpperCAmelCase=50 , UpperCAmelCase=1 , UpperCAmelCase=42 , ) ->Optional[int]:
"""simple docstring"""
A = torch.Generator(pipeline.device ).manual_seed(UpperCAmelCase )
A = pipeline(
UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=UpperCAmelCase , generator=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , ).images
A = int(math.sqrt(UpperCAmelCase ) )
A = image_grid(UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_lowerCamelCase : str = parse_args()
# Load models and create wrapper for stable diffusion
_lowerCamelCase : Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_lowerCamelCase : Dict = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_lowerCamelCase : Union[str, Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_lowerCamelCase : int = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowerCamelCase : Any = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_lowerCamelCase : Optional[int] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_lowerCamelCase : str = unet.to(torch.device('cuda', args.cuda_id))
_lowerCamelCase : Union[str, Any] = pipeline.to(unet.device)
_lowerCamelCase , _lowerCamelCase : Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_lowerCamelCase : Union[str, Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 337
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (position - 1) // 2
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (2 * position) + 1
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (2 * position) + 2
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : list[tuple[T, int]] = []
lowerCAmelCase : dict[T, int] = {}
lowerCAmelCase : int = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def lowercase__ ( self ):
"""simple docstring"""
return self.elements == 0
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase : Tuple = self.elements
self.elements += 1
self._bubble_up(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase , lowerCAmelCase : Tuple = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.heap[0]
self._bubble_down(snake_case__ )
return elem
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.position_map[elem]
lowerCAmelCase : Optional[int] = (elem, weight)
if position > 0:
lowerCAmelCase : Union[str, Any] = get_parent_position(snake_case__ )
lowerCAmelCase , lowerCAmelCase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(snake_case__ )
else:
self._bubble_down(snake_case__ )
else:
self._bubble_down(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase : List[str] = get_parent_position(snake_case__ )
lowerCAmelCase , lowerCAmelCase : str = self.heap[curr_pos]
lowerCAmelCase , lowerCAmelCase : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_up(snake_case__ )
return None
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.position_map[elem]
lowerCAmelCase , lowerCAmelCase : List[str] = self.heap[curr_pos]
lowerCAmelCase : Optional[Any] = get_child_left_position(snake_case__ )
lowerCAmelCase : Union[str, Any] = get_child_right_position(snake_case__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.heap[child_left_position]
lowerCAmelCase , lowerCAmelCase : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
if child_left_position < self.elements:
lowerCAmelCase , lowerCAmelCase : int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
return None
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.heap[nodea_pos][0]
lowerCAmelCase : Optional[Any] = self.heap[nodea_pos][0]
lowerCAmelCase , lowerCAmelCase : Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase : int = nodea_pos
lowerCAmelCase : str = nodea_pos
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : dict[T, dict[T, int]] = {}
lowerCAmelCase : int = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase : List[Any] = {}
self.nodes += 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
self.add_node(snake_case__ )
self.add_node(snake_case__ )
lowerCAmelCase : List[Any] = weight
lowerCAmelCase : Dict = weight
def a__ ( SCREAMING_SNAKE_CASE : GraphUndirectedWeighted[T] , ):
'''simple docstring'''
lowerCAmelCase : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase : Optional[Any] = priority_queue.extract_min()
lowerCAmelCase : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase : Tuple = node
return dist, parent
| 133
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =(DEISMultistepScheduler,)
a : str =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : Optional[int] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.timesteps[5]
lowerCAmelCase : str = scheduler.timesteps[6]
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = 10
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 133
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ['''image''']
UpperCamelCase = ['''image''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def snake_case_( self ) -> Optional[Any]:
return 32
@property
def snake_case_( self ) -> Union[str, Any]:
return 32
@property
def snake_case_( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_( self ) -> Tuple:
return 8
@property
def snake_case_( self ) -> List[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_SCREAMING_SNAKE_CASE = CLIPVisionModel(A )
return model
@property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_( self ) -> Optional[int]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_SCREAMING_SNAKE_CASE = PriorTransformer(**A )
return model
@property
def snake_case_( self ) -> Any:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_SCREAMING_SNAKE_CASE = ShapERenderer(**A )
return model
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.dummy_prior
_SCREAMING_SNAKE_CASE = self.dummy_image_encoder
_SCREAMING_SNAKE_CASE = self.dummy_image_processor
_SCREAMING_SNAKE_CASE = self.dummy_renderer
_SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_SCREAMING_SNAKE_CASE = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def snake_case_( self , A , A=0 ) -> Tuple:
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A ).manual_seed(A )
_SCREAMING_SNAKE_CASE = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(A ) )
_SCREAMING_SNAKE_CASE = output.images[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = torch_device == """cpu"""
_SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
_SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
_SCREAMING_SNAKE_CASE = pipe(**A , num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
_SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = torch.Generator(device=A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A )
| 58
|
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = ['image_processor', 'tokenizer']
_snake_case : Optional[int] = 'BlipImageProcessor'
_snake_case : Optional[int] = 'AutoTokenizer'
def __init__( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.image_processor
def __call__( self : Any , lowerCAmelCase__ : ImageInput = None , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case__ ( self : str , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 287
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''c'''] )
self.assertEqual(lowerCAmelCase__ , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [-3, -1] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowerCAmelCase__ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 287
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : int = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[int] = num_channels
lowercase_ : Union[str, Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : str = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = FlaxViTModel(config=__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ : List[Any] = (self.patch_size, self.patch_size)
lowercase_ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : str = FlaxViTForImageClassification(config=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[int] = FlaxViTForImageClassification(__UpperCamelCase )
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) ,
) : List[Any] = config_and_inputs
lowercase_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxViTModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(__UpperCamelCase )
lowercase_ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase ,**__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase ,**__UpperCamelCase )
with self.subTest('JIT Enabled' ):
lowercase_ : Optional[int] = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase_ : List[str] = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ : Optional[int] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
lowercase_ : int = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__UpperCamelCase )
| 213
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( lowercase_ ):
lowercase = 42
class UpperCamelCase ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self ,__UpperCamelCase = 32 ,__UpperCamelCase = 64 ,__UpperCamelCase = 20 ,__UpperCamelCase = 768 ,__UpperCamelCase=77 ,__UpperCamelCase=4 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = "silu" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "linear" ,__UpperCamelCase = "prd" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase_ : Dict = num_attention_heads
lowercase_ : Dict = attention_head_dim
lowercase_ : Any = num_attention_heads * attention_head_dim
lowercase_ : Optional[Any] = additional_embeddings
lowercase_ : int = time_embed_dim or inner_dim
lowercase_ : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase_ : List[str] = clip_embed_dim or embedding_dim
lowercase_ : Union[str, Any] = Timesteps(__UpperCamelCase ,__UpperCamelCase ,0 )
lowercase_ : Tuple = TimestepEmbedding(__UpperCamelCase ,__UpperCamelCase ,out_dim=__UpperCamelCase ,act_fn=__UpperCamelCase )
lowercase_ : Optional[Any] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
if embedding_proj_norm_type is None:
lowercase_ : Dict = None
elif embedding_proj_norm_type == "layer":
lowercase_ : str = nn.LayerNorm(__UpperCamelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase_ : Tuple = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
if encoder_hid_proj_type is None:
lowercase_ : str = None
elif encoder_hid_proj_type == "linear":
lowercase_ : Optional[int] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase_ : List[Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,__UpperCamelCase ) )
if added_emb_type == "prd":
lowercase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,1 ,__UpperCamelCase ) )
elif added_emb_type is None:
lowercase_ : List[Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dropout=__UpperCamelCase ,activation_fn='gelu' ,attention_bias=__UpperCamelCase ,)
for d in range(__UpperCamelCase )
] )
if norm_in_type == "layer":
lowercase_ : Optional[int] = nn.LayerNorm(__UpperCamelCase )
elif norm_in_type is None:
lowercase_ : Dict = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase_ : List[Any] = nn.LayerNorm(__UpperCamelCase )
lowercase_ : Optional[int] = nn.Linear(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase_ : int = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,__UpperCamelCase ,persistent=__UpperCamelCase )
lowercase_ : List[Any] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
lowercase_ : Optional[int] = nn.Parameter(torch.zeros(1 ,__UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
lowercase_ : Tuple = {}
def fn_recursive_add_processors(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if hasattr(__UpperCamelCase ,'set_processor' ):
lowercase_ : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,__UpperCamelCase ,__UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return processors
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = len(self.attn_processors.keys() )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__UpperCamelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if hasattr(__UpperCamelCase ,'set_processor' ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
module.set_processor(__UpperCamelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,__UpperCamelCase ,__UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,) -> Any:
'''simple docstring'''
lowercase_ : Any = hidden_states.shape[0]
lowercase_ : str = timestep
if not torch.is_tensor(__UpperCamelCase ):
lowercase_ : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(__UpperCamelCase ) and len(timesteps.shape ) == 0:
lowercase_ : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ : Optional[int] = timesteps * torch.ones(__UpperCamelCase ,dtype=timesteps.dtype ,device=timesteps.device )
lowercase_ : Any = self.time_proj(__UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase_ : List[Any] = timesteps_projected.to(dtype=self.dtype )
lowercase_ : Any = self.time_embedding(__UpperCamelCase )
if self.embedding_proj_norm is not None:
lowercase_ : List[str] = self.embedding_proj_norm(__UpperCamelCase )
lowercase_ : Any = self.embedding_proj(__UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase_ : Optional[int] = self.encoder_hidden_states_proj(__UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase_ : List[Any] = self.proj_in(__UpperCamelCase )
lowercase_ : List[str] = self.positional_embedding.to(hidden_states.dtype )
lowercase_ : Dict = []
lowercase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(__UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase_ : List[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase_ : List[Any] = hidden_states[:, None, :]
lowercase_ : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase_ : int = self.prd_embedding.to(hidden_states.dtype ).expand(__UpperCamelCase ,-1 ,-1 )
additional_embeds.append(__UpperCamelCase )
lowercase_ : int = torch.cat(
__UpperCamelCase ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase_ : Optional[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase_ : Any = F.pad(
__UpperCamelCase ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
lowercase_ : Dict = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase_ : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase_ : str = F.pad(__UpperCamelCase ,(0, self.additional_embeddings) ,value=0.0 )
lowercase_ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase_ : Tuple = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
lowercase_ : Optional[Any] = self.norm_in(__UpperCamelCase )
for block in self.transformer_blocks:
lowercase_ : int = block(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : str = self.norm_out(__UpperCamelCase )
if self.prd_embedding is not None:
lowercase_ : Tuple = hidden_states[:, -1]
else:
lowercase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
lowercase_ : str = self.proj_to_clip_embeddings(__UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 213
| 1
|
"""simple docstring"""
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A_ = float('''nan''')
class lowercase:
'''simple docstring'''
def __init__( self: int, a_: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = sys.stdout
_snake_case : int = open(a_, """a""" )
def __getattr__( self: List[str], a_: List[Any] ):
'''simple docstring'''
return getattr(self.stdout, a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any] ):
'''simple docstring'''
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""", """""", a_, 0, re.M ) )
def UpperCAmelCase__ (snake_case__ : Union[str, Any]=80 , snake_case__ : Any=False ):
"""simple docstring"""
_snake_case : Optional[Any] = []
# deal with critical env vars
_snake_case : Optional[int] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
_snake_case : List[str] = os.environ.get(snake_case__ , snake_case__ )
if val is not None:
cmd.append(F"{key}={val}" )
# python executable (not always needed if the script is executable)
_snake_case : Union[str, Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_snake_case : str = []
_snake_case : Any = """"""
while len(snake_case__ ) > 0:
current_line += F"{cmd.pop(0 )} "
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
_snake_case : Optional[Any] = """"""
return "\\\n".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Dict = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
_snake_case : str = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_snake_case : Union[str, Any] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_snake_case : Optional[Any] = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
_snake_case : int = variation.replace(""" """ , """-""" )
with open(Path(snake_case__ ) / F"log.{prefix}.stdout.txt" , """w""" ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F"log.{prefix}.stderr.txt" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Optional[int] = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Optional[int] , ):
"""simple docstring"""
_snake_case : Optional[Any] = []
_snake_case : List[str] = []
_snake_case : List[str] = F"{id}: {variation:<{longest_variation_len}}"
_snake_case : Optional[int] = F"{preamble}: "
_snake_case : int = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ):
_snake_case : Optional[int] = process_run_single(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case : Any = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
_snake_case : int = F"\33[2K\r{outcome}"
if len(snake_case__ ) > 0:
_snake_case : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_snake_case : Optional[int] = round(mean_metrics[target_metric_key] , 2 )
_snake_case : Optional[Any] = F"{outcome} {mean_target}"
if len(snake_case__ ) > 1:
results_str += F" {tuple(round(snake_case__ , 2 ) for x in results )}"
print(snake_case__ )
_snake_case : str = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = pd.DataFrame(snake_case__ )
_snake_case : Dict = """variation"""
_snake_case : Any = """diff_%"""
_snake_case : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_snake_case : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
_snake_case : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
_snake_case : Tuple = df.apply(
lambda snake_case__ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
_snake_case : Optional[int] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_snake_case : List[str] = df.reindex(snake_case__ , axis="""columns""" ) # reorder cols
# capitalize
_snake_case : Dict = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
_snake_case : Optional[int] = df.rename(lambda snake_case__ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
_snake_case : int = df.rename(lambda snake_case__ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
_snake_case : Dict = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt=""".2f""" )]
print("""\n\n""".join(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=snake_case__ , type=snake_case__ , nargs="""+""" , required=snake_case__ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=snake_case__ , type=snake_case__ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=snake_case__ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=snake_case__ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=snake_case__ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=snake_case__ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
_snake_case : Optional[int] = get_base_command(snake_case__ , snake_case__ )
# split each dimension into its --foo variations
_snake_case : Any = [list(map(str.strip , re.split(R"""\|""" , snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_snake_case : Tuple = list(map(str.strip , map(""" """.join , itertools.product(*snake_case__ ) ) ) )
_snake_case : Any = max(len(snake_case__ ) for x in variations )
# split wanted keys
_snake_case : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_snake_case : Optional[Any] = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(F"and this script's output is also piped into {report_fn}" )
_snake_case : Optional[int] = Tee(snake_case__ )
print(F"\n*** Running {len(snake_case__ )} benchmarks:" )
print(F"Base command: {' '.join(snake_case__ )}" )
_snake_case : int = """variation"""
_snake_case : str = []
for id, variation in enumerate(tqdm(snake_case__ , desc="""Total completion: """ , leave=snake_case__ ) ):
_snake_case : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) )
process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ )
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
_snake_case : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[Any] = format(snake_case__ , """08x""" )[-8:]
_snake_case : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = B""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
_snake_case : List[Any] = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
_snake_case : List[str] = bit_string[pos : pos + 5_12]
_snake_case : List[str] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[int] = format(snake_case__ , """032b""" )
_snake_case : str = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Any = preprocess(snake_case__ )
_snake_case : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Union[str, Any] = 0x6745_2301
_snake_case : List[Any] = 0xEFCD_AB89
_snake_case : Optional[Any] = 0x98BA_DCFE
_snake_case : Optional[int] = 0x1032_5476
_snake_case : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
_snake_case : Tuple = aa
_snake_case : str = ba
_snake_case : int = ca
_snake_case : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : int = d ^ (b & (c ^ d))
_snake_case : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Tuple = c ^ (d & (b ^ c))
_snake_case : int = (5 * i + 1) % 16
elif i <= 47:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : Tuple = c ^ (b | not_aa(snake_case__ ))
_snake_case : Optional[int] = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : List[Any] = c
_snake_case : str = b
_snake_case : List[str] = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Union[str, Any] = sum_aa(snake_case__ , snake_case__ )
_snake_case : str = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = sum_aa(snake_case__ , snake_case__ )
_snake_case : List[str] = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
return sd
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=rename_keys_prefix ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = OrderedDict()
_SCREAMING_SNAKE_CASE = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_SCREAMING_SNAKE_CASE = key
for name_pair in rename_keys_prefix:
_SCREAMING_SNAKE_CASE = new_key.replace(name_pair[0] ,name_pair[1] )
_SCREAMING_SNAKE_CASE = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_SCREAMING_SNAKE_CASE = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_SCREAMING_SNAKE_CASE = 'pretraining'
if "vcr" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 5_12}
_SCREAMING_SNAKE_CASE = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 20_48}
_SCREAMING_SNAKE_CASE = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_SCREAMING_SNAKE_CASE = 'vqa'
elif "nlvr" in checkpoint_path:
_SCREAMING_SNAKE_CASE = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_SCREAMING_SNAKE_CASE = 'nlvr'
_SCREAMING_SNAKE_CASE = VisualBertConfig(**snake_case__ )
# Load State Dict
_SCREAMING_SNAKE_CASE = load_state_dict(snake_case__ )
_SCREAMING_SNAKE_CASE = get_new_dict(snake_case__ ,snake_case__ )
if model_type == "pretraining":
_SCREAMING_SNAKE_CASE = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
_SCREAMING_SNAKE_CASE = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
_SCREAMING_SNAKE_CASE = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
_SCREAMING_SNAKE_CASE = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 306
|
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str:
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase_ : int = int(max(0 , i - limit ) )
UpperCamelCase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
UpperCamelCase_ : Dict = F"{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}"
return "".join(lowerCamelCase )
# matching characters
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
# transposition
UpperCamelCase_ : int = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase_ : Union[str, Any] = 0.0
else:
UpperCamelCase_ : str = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase_ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 175
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (DDIMParallelScheduler,)
__UpperCAmelCase : str = (("eta", 0.0), ("num_inference_steps", 50))
def __snake_case ( self : List[Any] , **lowerCamelCase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : List[Any] , **lowerCamelCase : int ) -> Dict:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase )
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case : Dict = 10, 0.0
__snake_case : Tuple = self.dummy_model()
__snake_case : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for t in scheduler.timesteps:
__snake_case : Dict = model(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __snake_case ( self : str ) -> Tuple:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__snake_case : Optional[Any] = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : Dict ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __snake_case ( self : int ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __snake_case ( self : Any ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> List[Any]:
self.check_over_configs(thresholding=lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , )
def __snake_case ( self : List[Any] ) -> str:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase , num_inference_steps=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase , eta=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Any:
__snake_case : int = self.scheduler_classes[0]
__snake_case : Any = self.get_scheduler_config()
__snake_case : int = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __snake_case ( self : List[Any] ) -> str:
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case : Any = 10, 0.0
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Optional[int] = self.dummy_model()
__snake_case : List[str] = self.dummy_sample_deter
__snake_case : str = self.dummy_sample_deter + 0.1
__snake_case : Tuple = self.dummy_sample_deter - 0.1
__snake_case : Optional[Any] = samplea.shape[0]
__snake_case : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : Dict = torch.arange(lowerCamelCase )[0:3, None].repeat(1 , lowerCamelCase )
__snake_case : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Tuple = scheduler.batch_step_no_noise(lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase )
__snake_case : Tuple = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Optional[int] = self.full_loop()
__snake_case : Any = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case : int = self.full_loop(prediction_type="v_prediction" )
__snake_case : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : List[str] = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __snake_case ( self : List[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : Dict = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 360
|
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case : Optional[Any] = logging.getLogger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "masked_bert"
def __init__( self : Optional[int] , lowerCamelCase : Any=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : str=12 , lowerCamelCase : Dict=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : int=2 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : Dict="topK" , lowerCamelCase : List[Any]="constant" , lowerCamelCase : Dict=0.0 , **lowerCamelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : str = pruning_method
__snake_case : int = mask_init
__snake_case : Any = mask_scale
| 134
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Tuple=4 , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = parent
SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_: Dict = seq_length
SCREAMING_SNAKE_CASE_: int = is_training
SCREAMING_SNAKE_CASE_: str = use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_: Any = use_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: str = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Any = max_position_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_: str = num_choices
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = FlaxAlbertModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class_name.from_pretrained("albert-base-v2")
SCREAMING_SNAKE_CASE_: Dict = model(np.ones((1, 1)))
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = FlaxAlbertModel.from_pretrained("albert-base-v2")
SCREAMING_SNAKE_CASE_: Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
SCREAMING_SNAKE_CASE_: str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
SCREAMING_SNAKE_CASE_: str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
SCREAMING_SNAKE_CASE_: Dict = (1, 11, 768)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 13
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
"""simple docstring"""
lowerCAmelCase__ = '''encoder-decoder'''
lowerCAmelCase__ = True
def __init__( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase_ = kwargs.pop("encoder" )
lowercase_ = encoder_config.pop("model_type" )
lowercase_ = kwargs.pop("decoder" )
lowercase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase_ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = True
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 9
A__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A__ = kruskal(A_ , A_ )
A__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A_ ) == sorted(A_ )
| 221
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : Any=1_0_0 ,lowercase_ : str=1_3 ,lowercase_ : Any=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=3 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=5 ,lowercase_ : Any=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Dict=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : int=0.02 ,lowercase_ : List[str]=3 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Any = FlaxBeitModel(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : Dict = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(lowercase_ )
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Dict ,**lowercase_ : List[Any] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Any = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase__ : str = np.ones((1, 1_9_6) ,dtype=lowercase_ )
# forward pass
lowerCAmelCase__ : Dict = model(pixel_values=lowercase_ ,bool_masked_pos=lowercase_ )
lowerCAmelCase__ : str = outputs.logits
# verify the logits
lowerCAmelCase__ : Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowercase_ ,atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Dict = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
| 106
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = TransfoXLTokenizer
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[Any] = False
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case (self , **__lowercase ):
__lowerCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''<unk> UNwanted , running'''
__lowerCAmelCase = '''<unk> unwanted, running'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowercase )
__lowerCAmelCase = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [0, 4, 8, 7] )
def _snake_case (self ):
__lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _snake_case (self ):
__lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _snake_case (self ):
__lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase )
__lowerCAmelCase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__lowerCAmelCase = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) , __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case : Union[str, Any] = """base_with_context"""
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Dict = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""]))
a_ : str = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""]) , requires_grad=a__)
for lyr_num, lyr in enumerate(model.encoders):
a_ : List[Any] = weights[f'''layers_{lyr_num}''']
a_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""]))
a_ : List[Any] = ly_weight["""attention"""]
a_ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T))
a_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""]))
a_ : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T))
a_ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T))
a_ : Any = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""]))
return model
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T))
a_ : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""]) , requires_grad=a__)
for lyr_num, lyr in enumerate(model.encoders):
a_ : Optional[int] = weights[f'''layers_{lyr_num}''']
a_ : List[Any] = ly_weight["""attention"""]
a_ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T))
a_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T))
a_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T))
a_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T))
a_ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""]))
a_ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T))
a_ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T))
a_ : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T))
a_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""]))
a_ : List[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""]))
return model
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T))
a_ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T))
a_ : str = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""]) , requires_grad=a__)
a_ : Tuple = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T))
for lyr_num, lyr in enumerate(model.decoders):
a_ : List[Any] = weights[f'''layers_{lyr_num}''']
a_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""]))
a_ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T))
a_ : Tuple = ly_weight["""self_attention"""]
a_ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T))
a_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T))
a_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T))
a_ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T))
a_ : str = ly_weight["""MultiHeadDotProductAttention_0"""]
a_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T))
a_ : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T))
a_ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T))
a_ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T))
a_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""]))
a_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""]))
a_ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T))
a_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T))
a_ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T))
a_ : List[Any] = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""]))
a_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T))
return model
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path)
a_ : List[Any] = jnp.tree_util.tree_map(onp.array , a__)
a_ : Tuple = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
a_ : List[str] = os.path.join(args.checkpoint_path , """..""" , """config.gin""")
a_ : Union[str, Any] = inference.parse_training_gin_file(a__ , a__)
a_ : Optional[int] = inference.InferenceModel(args.checkpoint_path , a__)
a_ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""")
a_ : Optional[int] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
a_ : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
a_ : Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
a_ : Any = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , a__)
a_ : Optional[int] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , a__)
a_ : Union[str, Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , a__)
a_ : Union[str, Any] = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""")
a_ : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=a__ , continuous_encoder=a__ , decoder=a__ , scheduler=a__ , melgan=a__ , )
if args.save:
pipe.save_pretrained(args.output_path)
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__snake_case : int = parser.parse_args()
main(args)
| 248
|
from typing import Any
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
a_ : List[str] = data
a_ : Optional[int] = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class A__:
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
a_ : Dict = None
def __iter__( self ) -> Any:
a_ : Optional[Any] = self.head
while node:
yield node.data
a_ : Union[str, Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_lowercase ) for item in self] )
def __getitem__( self , _lowercase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _lowercase , _lowercase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
a_ : Optional[Any] = self.head
for _ in range(_lowercase ):
a_ : List[str] = current.next
a_ : Any = data
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(len(self ) , _lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(0 , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
a_ : Optional[int] = Node(_lowercase )
if self.head is None:
a_ : int = new_node
elif index == 0:
a_ : List[Any] = self.head # link new_node to head
a_ : Any = new_node
else:
a_ : Optional[int] = self.head
for _ in range(index - 1 ):
a_ : Optional[int] = temp.next
a_ : Optional[int] = temp.next
a_ : int = new_node
def UpperCamelCase__ ( self ) -> None: # print every node data
print(self )
def UpperCamelCase__ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _lowercase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
a_ : Optional[int] = self.head # default first node
if index == 0:
a_ : List[Any] = self.head.next
else:
a_ : List[Any] = self.head
for _ in range(index - 1 ):
a_ : List[Any] = temp.next
a_ : Any = temp.next
a_ : Any = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
return self.head is None
def UpperCamelCase__ ( self ) -> None:
a_ : Any = None
a_ : Union[str, Any] = self.head
while current:
# Store the current node's next node.
a_ : Dict = current.next
# Make the current node's next point backwards
a_ : Optional[Any] = prev
# Make the previous node be the current node
a_ : Optional[int] = current
# Make the current node the next node (to progress iteration)
a_ : List[str] = next_node
# Return prev in order to put the head at the end
a_ : Dict = prev
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(a__) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0):
assert len(a__) == i
linked_list.insert_nth(a__ , i + 1)
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_1))
linked_list.insert_head(0)
linked_list.insert_tail(1_1)
assert str(a__) == "->".join(str(a__) for i in range(0 , 1_2))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(a__) == 9
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_0))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
a_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(a__) == "->".join(str(a__) for i in range(-8 , 1))
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(1_0),
None,
None,
12.20,
]
a_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(a__)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(a__) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a_ : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a_ : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a_ : List[Any] = linked_list.delete_nth(1_0)
assert result is None
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!"""))
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(a__)
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(a__)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
a_ : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """).strip())
linked_list.insert_head(input("""Inserting 2nd at head """).strip())
print("""\nPrint list:""")
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """).strip())
linked_list.insert_tail(input("""Inserting 2nd at tail """).strip())
print("""\nPrint list:""")
linked_list.print_list()
print("""\nDelete head""")
linked_list.delete_head()
print("""Delete tail""")
linked_list.delete_tail()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nReverse linked list""")
linked_list.reverse()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nString representation of linked list:""")
print(a__)
print("""\nReading/changing Node data using indexing:""")
print(f'''Element at Position 1: {linked_list[1]}''')
a_ : List[Any] = input("""Enter New Value: """).strip()
print("""New list:""")
print(a__)
print(f'''length of linked_list is : {len(a__)}''')
if __name__ == "__main__":
main()
| 248
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int = 50 ) -> int:
'''simple docstring'''
__snake_case : List[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 95
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_a : List[Any]= logging.get_logger(__name__)
_a : Any= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : int= {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_a : Optional[Any]= {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
_a : str= {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Dict = RoFormerTokenizer
def __init__(self : List[Any] , _A : Any=None , _A : int=None , _A : Dict=True , _A : List[Any]="[UNK]" , _A : Tuple="[SEP]" , _A : List[Any]="[PAD]" , _A : str="[CLS]" , _A : int="[MASK]" , _A : Optional[int]=True , _A : List[str]=None , **_A : int , ) -> Dict:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , _A) != do_lower_case
or pre_tok_state.get('strip_accents' , _A) != strip_accents
):
__snake_case : Union[str, Any] = getattr(_A , pre_tok_state.pop('type'))
__snake_case : Union[str, Any] = do_lower_case
__snake_case : str = strip_accents
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : int = do_lower_case
def __getstate__(self : Optional[Any]) -> Dict:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : int = BertPreTokenizer()
return state
def __setstate__(self : Optional[Any] , _A : Optional[Any]) -> Dict:
__snake_case : List[str] = d
__snake_case : str = self.__dict__['_tokenizer'].get_vocab()
__snake_case : int = PreTokenizer.custom(JiebaPreTokenizer(_A))
def _lowercase (self : int , _A : Tuple , _A : Any=None) -> str:
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : List[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[Any] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : int , _A : Optional[int] , _A : Tuple=None , _A : Tuple=None , _A : Dict=False , **_A : Optional[int] , ) -> Optional[Any]:
__snake_case : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A)
| 95
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
lowercase__ = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ : Tuple = int(re.match(r".*layer_(\d*).*" , __UpperCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ : Any = re.search(r"[^\d](\d+)$" , str(__UpperCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCAmelCase_ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase_ : Optional[int] = BloomConfig()
else:
lowerCAmelCase_ : List[Any] = BloomConfig.from_json_file(__UpperCamelCase )
if shard_model:
lowerCAmelCase_ : Union[str, Any] = os.listdir(__UpperCamelCase )
lowerCAmelCase_ : str = sorted(filter(lambda __UpperCamelCase : s.startswith("layer" ) and "model_00" in s , __UpperCamelCase ) )
lowerCAmelCase_ : Dict = {"weight_map": {}, "metadata": {}}
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Dict = BloomConfig()
for j, file in enumerate(__UpperCamelCase ):
print("Processing file: {}".format(__UpperCamelCase ) )
lowerCAmelCase_ : Any = None
for i in range(__UpperCamelCase ):
# load all TP files
lowerCAmelCase_ : Tuple = file.replace("model_00" , f'''model_0{i}''' )
lowerCAmelCase_ : Dict = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase_ : Tuple = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : List[str] = temp.pop(__UpperCamelCase )
if tensors is None:
lowerCAmelCase_ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : Dict = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Optional[int] = tensors[key] / pretraining_tp
torch.save(
__UpperCamelCase , os.path.join(
__UpperCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ : str = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCamelCase ) ).zfill(5 ) )
lowerCAmelCase_ : str = BloomConfig()
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCAmelCase_ : Tuple = total_size
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowerCAmelCase_ : str = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
else:
lowerCAmelCase_ : int = BloomModel(__UpperCamelCase )
lowerCAmelCase_ : Any = os.listdir(__UpperCamelCase )
lowerCAmelCase_ : str = sorted(filter(lambda __UpperCamelCase : s.startswith("layer" ) and "model_00" in s , __UpperCamelCase ) )
lowerCAmelCase_ : Tuple = None
for i, file in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : int = None
for i in range(__UpperCamelCase ):
# load all TP files
lowerCAmelCase_ : Optional[Any] = file.replace("model_00" , f'''model_0{i}''' )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase_ : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : List[Any] = temp.pop(__UpperCamelCase )
if tensors is None:
lowerCAmelCase_ : Tuple = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : List[Any] = torch.cat([tensors[key], temp[key]] , dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : List[str] = tensors[key] / pretraining_tp
lowerCAmelCase_ : List[Any] = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
lowerCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
lowerCAmelCase_ : int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCAmelCase_ : Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
lowercase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 161
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = """lilt"""
def __init__( self : Any , a_ : List[str]=3_05_22 , a_ : List[Any]=7_68 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : str=30_72 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : List[Any]=5_12 , a_ : List[str]=2 , a_ : int=0.02 , a_ : Optional[int]=1e-1_2 , a_ : Any=0 , a_ : str="absolute" , a_ : List[Any]=None , a_ : Optional[int]=4 , a_ : str=10_24 , **a_ : Union[str, Any] , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Tuple = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
lowerCAmelCase_ : Optional[Any] = channel_shrink_ratio
lowerCAmelCase_ : Dict = max_ad_position_embeddings
| 161
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Tuple = "codegen"
snake_case_ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , snake_case__ : Any=50_400 , snake_case__ : int=2_048 , snake_case__ : Optional[Any]=2_048 , snake_case__ : Tuple=4_096 , snake_case__ : List[str]=28 , snake_case__ : List[Any]=16 , snake_case__ : int=64 , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=1e-5 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str=50_256 , snake_case__ : List[str]=50_256 , snake_case__ : Optional[int]=False , **snake_case__ : str , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_ctx
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : List[str] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
_UpperCAmelCase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 13
| 133
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "ctrl"
snake_case_ : Optional[int] = ["past_key_values"]
snake_case_ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case__ : List[str]=246_534 , snake_case__ : Optional[Any]=256 , snake_case__ : List[str]=1_280 , snake_case__ : Optional[int]=8_192 , snake_case__ : List[Any]=48 , snake_case__ : Dict=16 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**snake_case__ )
| 133
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase_ = False
class __A( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = generator.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase_ (self ):
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """cyberpunk 2077"""
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCamelCase__ = """A painting of a squirrel eating a burger """
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCamelCase__ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 244
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__ ( __a : List[str] , __a : List[Any] , __a : int , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__a )
if decoder_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__a )
if cross_attn_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.eos_token_id # Eos Token
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCAmelCase_ (self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = inputs_dict["""attention_mask"""]
UpperCamelCase__ = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.encoder_last_hidden_state
UpperCamelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not self.is_encoder_decoder:
UpperCamelCase__ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase__ = inputs["""input_ids"""]
UpperCamelCase__ = inputs.get("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )[0]
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval().to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=3 )
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
return torch.tensor(__a , dtype=torch.long , device=__a )
lowerCamelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
# change to intended input
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
UpperCamelCase__ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
UpperCamelCase__ = model.generate(
input_ids=dct["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) , attention_mask=dct["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
UpperCamelCase__ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
assert generated == expected_en
| 244
| 1
|
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
lowercase :int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase :int = n - k
# Calculate C(n,k)
for i in range(a_):
result *= n - i
result //= i + 1
return result
def lowerCamelCase (a_ :int) -> int:
return binomial_coefficient(2 * node_count , a_) // (node_count + 1)
def lowerCamelCase (a_ :int) -> int:
if n < 0:
raise ValueError('''factorial() not defined for negative values''')
lowercase :Union[str, Any] = 1
for i in range(1 , n + 1):
result *= i
return result
def lowerCamelCase (a_ :int) -> int:
return catalan_number(a_) * factorial(a_)
if __name__ == "__main__":
UpperCAmelCase = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 368
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :Optional[int] , a_ :Union[str, Any] , a_ :Optional[Any]=None) -> List[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
lowercase :int = nn.Parameter(a_)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
lowercase :Tuple = nn.Parameter(a_)
def lowerCamelCase (a_ :int , a_ :Any , a_ :Optional[int]) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :List[Any] = np.asarray(weights[1])
lowercase :Optional[int] = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :str , a_ :Any , a_ :Union[str, Any]) -> Dict:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :Dict = np.asarray(weights[1])
lowercase :Dict = np.asarray(weights[2])
lowercase :Optional[Any] = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :Union[str, Any] , a_ :Dict , a_ :Optional[int]) -> Optional[Any]:
# layernorm 1
lowercase :Optional[int] = weights[0][0][0]
lowercase :Union[str, Any] = np.asarray(layer_norm_a[0])
lowercase :List[str] = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# lsh weights + output
lowercase :Optional[Any] = weights[0][1]
if len(a_) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_)
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_)
# intermediate weighs
lowercase :Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_) == 4:
lowercase :int = intermediate_weights[2]
# layernorm 2
lowercase :int = np.asarray(intermediate_weights[0][0])
lowercase :Union[str, Any] = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# intermediate dense
lowercase :Dict = np.asarray(intermediate_weights[1][0])
lowercase :Optional[Any] = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
# intermediate out
lowercase :Union[str, Any] = np.asarray(intermediate_weights[4][0])
lowercase :Tuple = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Tuple , a_ :Dict , a_ :Tuple) -> str:
# reformer model
lowercase :Union[str, Any] = torch_model.reformer
# word embeds
lowercase :Tuple = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_) , )
if isinstance(weights[3] , a_):
lowercase :str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
lowercase :List[str] = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
lowercase :int = nn.Parameter(torch.tensor(a_))
lowercase :Dict = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
a_), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
lowercase :Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_)
# output layer norm
lowercase :Dict = np.asarray(weights[7][0])
lowercase :Optional[Any] = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# output embeddings
lowercase :str = np.asarray(weights[9][0])
lowercase :Union[str, Any] = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Tuple) -> Union[str, Any]:
# Initialise PyTorch model
lowercase :Optional[Any] = ReformerConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
lowercase :Dict = ReformerModelWithLMHead(a_)
with open(a_ , '''rb''') as f:
lowercase :Tuple = pickle.load(a_)['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 172
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=32 * 4 , lowerCAmelCase__=32 * 6 , lowerCAmelCase__=4 , lowerCAmelCase__=32 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_auxiliary_loss
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_size
SCREAMING_SNAKE_CASE = max_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = mask_feature_size
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
SCREAMING_SNAKE_CASE = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = output.encoder_hidden_states
SCREAMING_SNAKE_CASE = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_config.decoder_layers )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
with torch.no_grad():
SCREAMING_SNAKE_CASE = MaskFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = MaskFormerForInstanceSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = False
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __A ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __A ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __A ( self ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __A ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __A ( self ) -> str:
pass
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE = MaskFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
SCREAMING_SNAKE_CASE = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def __A ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> Any:
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase = 1E-4
def lowercase () -> Optional[Any]:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase__ )
.eval()
)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# masks_queries_logits
SCREAMING_SNAKE_CASE = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowerCAmelCase__ )
.eval()
)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# masks_queries_logits
SCREAMING_SNAKE_CASE = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase__ )
.eval()
)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [el.to(lowerCAmelCase__ ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE = [el.to(lowerCAmelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 113
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase (SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
SCREAMING_SNAKE_CASE = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
SCREAMING_SNAKE_CASE = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return F'bert/{name}'
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE = torch_tensor.T
SCREAMING_SNAKE_CASE = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = session.run(SCREAMING_SNAKE_CASE_ )
print(F'Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
SCREAMING_SNAKE_CASE = parser.parse_args(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 113
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=56 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="block_sparse" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , )-> str:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_attention_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_choices
lowerCamelCase_ =rescale_embeddings
lowerCamelCase_ =attention_type
lowerCamelCase_ =use_bias
lowerCamelCase_ =block_size
lowerCamelCase_ =num_random_blocks
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =None
if self.use_attention_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ =BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase):
_UpperCamelCase:Tuple = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_UpperCamelCase:Any = False
_UpperCamelCase:Any = False
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case ( self )-> str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case ( self )-> str:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case ( self )-> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case ( self )-> int:
super().test_hidden_states_output()
@slow
def _snake_case ( self )-> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ =model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( self )-> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ =self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ =model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
return model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ =model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ =model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="outputs" , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
| 368
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCamelCase ( _A : Optional[int] ) ->List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ =k.replace(_A , _A )
if k.startswith("""encoder""" ):
lowerCamelCase_ =k.replace(""".attn""" , """.self_attn""" )
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm3""" , """final_layer_norm""" )
return k
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCamelCase_ =sd.pop(_A )
lowerCamelCase_ =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCamelCase_ =v
__A : Any = ['START']
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] , _A : List[str] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =model["""model"""]
lowerCamelCase_ =BlenderbotConfig.from_json_file(_A )
lowerCamelCase_ =BlenderbotForConditionalGeneration(_A )
lowerCamelCase_ =m.model.state_dict().keys()
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ =rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 49
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.