code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> np.ndarray:
'''simple docstring'''
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
@add_end_docstrings(lowerCAmelCase__ )
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A_ : Union[str, Any] = None
if self.model.config.prefix is not None:
A_ : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A_ : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A_ : Union[str, Any] = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
A_ : Any = {**self._preprocess_params, **preprocess_params}
A_ : Union[str, Any] = {**self._forward_params, **forward_params}
def lowerCAmelCase_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = {}
if prefix is not None:
A_ : List[Any] = prefix
if prefix:
A_ : Any = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
A_ : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
A_ : List[str] = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
A_ : List[Any] = generate_kwargs
A_ : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
A_ : int = return_type
if clean_up_tokenization_spaces is not None:
A_ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ : Tuple = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A_ : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , lowercase , **lowercase ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase_ ( self , lowercase , lowercase="" , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
A_ : int = prompt_text
if handle_long_generation == "hole":
A_ : Any = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
A_ : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
A_ : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A_ : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A_ : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
A_ : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = model_inputs["input_ids"]
A_ : Tuple = model_inputs.get('attention_mask' , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
A_ : List[Any] = None
A_ : Any = None
A_ : Optional[int] = 1
else:
A_ : Any = input_ids.shape[0]
A_ : Tuple = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A_ : Union[str, Any] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
A_ : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
A_ : Any = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A_ : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A_ : List[str] = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
A_ : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
A_ : Dict = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
A_ : int = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase_ ( self , lowercase , lowercase=ReturnType.FULL_TEXT , lowercase=True ):
"""simple docstring"""
A_ : Optional[int] = model_outputs["generated_sequence"][0]
A_ : Tuple = model_outputs["input_ids"]
A_ : int = model_outputs["prompt_text"]
A_ : Any = generated_sequence.numpy().tolist()
A_ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A_ : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A_ : Optional[int] = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A_ : Optional[Any] = 0
else:
A_ : int = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
A_ : int = prompt_text + text[prompt_length:]
else:
A_ : Optional[Any] = text[prompt_length:]
A_ : Optional[Any] = {"generated_text": all_text}
records.append(UpperCamelCase__ )
return records
| 140
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__: List[str] = """resnet"""
__magic_name__: List[Any] = ["""basic""", """bottleneck"""]
def __init__( self : Optional[int] , _A : Optional[Any]=3 , _A : List[Any]=64 , _A : Dict=[256, 512, 1024, 2048] , _A : List[str]=[3, 4, 6, 3] , _A : str="bottleneck" , _A : List[str]="relu" , _A : int=False , _A : Optional[int]=None , _A : Tuple=None , **_A : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
snake_case_ : Optional[Any] = num_channels
snake_case_ : List[str] = embedding_size
snake_case_ : Any = hidden_sizes
snake_case_ : List[str] = depths
snake_case_ : Optional[int] = layer_type
snake_case_ : Any = hidden_act
snake_case_ : Optional[Any] = downsample_in_first_stage
snake_case_ : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
snake_case_ : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
__magic_name__: int = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : Any ) -> float:
"""simple docstring"""
return 1E-3
| 327
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( lowerCAmelCase__ ):
lowerCAmelCase_ : int = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ : Tuple = """BridgeTowerImageProcessor"""
lowerCAmelCase_ : List[str] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , a__ , a__ ) -> str:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 0
|
'''simple docstring'''
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_: List[str] ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
SCREAMING_SNAKE_CASE_: int ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowercase (self : str , __a : Optional[Any] , __a : List[Any] ):
# convert to numpy arrays
UpperCAmelCase_ = np.array(UpperCamelCase__ )
UpperCAmelCase_ = np.array(UpperCamelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ = X - np.mean(UpperCamelCase__ )
UpperCAmelCase_ = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
UpperCAmelCase_ = np.linalg.pinv(UpperCamelCase__ )
UpperCAmelCase_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 0
|
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=36 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1000 , ):
_lowerCamelCase : int = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = coordinate_size
_lowerCamelCase : Tuple = shape_size
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : int = scope
_lowerCamelCase : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase : Dict = text_seq_length
_lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 + 1
_lowerCamelCase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowerCamelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Dict = bbox[i, j, 3]
_lowerCamelCase : Optional[int] = bbox[i, j, 1]
_lowerCamelCase : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : List[str] = bbox[i, j, 2]
_lowerCamelCase : Optional[int] = bbox[i, j, 0]
_lowerCamelCase : Tuple = tmp_coordinate
_lowerCamelCase : Union[str, Any] = tf.constant(UpperCamelCase__ )
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase : str = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = TFLayoutLMvaModel(config=UpperCamelCase__ )
# text + image
_lowerCamelCase : Any = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
_lowerCamelCase : Optional[int] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , )
_lowerCamelCase : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase : Union[str, Any] = model({'pixel_values': pixel_values} , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__ )
_lowerCamelCase : str = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__ )
_lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
_lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(_lowerCamelCase) : List[str] = config_and_inputs
_lowerCamelCase : Dict = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase ):
return True
def A_ ( self , lowercase , lowercase , lowercase=False ):
_lowerCamelCase : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
_lowerCamelCase : List[str] = {
k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
_lowerCamelCase : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
_lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
_lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
_lowerCamelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def A_ ( self ):
_lowerCamelCase : Dict = TFLayoutLMvaModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(UpperCamelCase__ )
if getattr(UpperCamelCase__ , 'hf_compute_loss' , UpperCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__ )[0]
]
_lowerCamelCase : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
_lowerCamelCase : Optional[int] = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowerCamelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowerCamelCase : Optional[int] = -100
_lowerCamelCase : Any = tf.convert_to_tensor(UpperCamelCase__ )
_lowerCamelCase : Dict = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowerCamelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
# Get keys that were added with the _prepare_for_class function
_lowerCamelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
_lowerCamelCase : Any = inspect.signature(model.call ).parameters
_lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowerCamelCase : Any = {0: "input_ids"}
for label_key in label_keys:
_lowerCamelCase : Optional[Any] = signature_names.index(UpperCamelCase__ )
_lowerCamelCase : int = label_key
_lowerCamelCase : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCamelCase : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowerCamelCase : Optional[Any] = prepared_for_class[value]
_lowerCamelCase : Any = tuple(UpperCamelCase__ )
# Send to model
_lowerCamelCase : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A_ ( self ):
(
_lowerCamelCase
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self ):
(
_lowerCamelCase
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self ):
(
_lowerCamelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self ):
(
_lowerCamelCase
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self ):
(
_lowerCamelCase
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@slow
def A_ ( self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=UpperCamelCase__ , return_tensors='tf' ).pixel_values
_lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
_lowerCamelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowerCamelCase : List[Any] = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
_lowerCamelCase : Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
_lowerCamelCase : Dict = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 96
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 0
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 319
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
@staticmethod
def lowercase_ ( ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
raise NotImplementedError
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowercase_ ( cls ) -> List[str]:
'''simple docstring'''
return f"""`pip install {cls.pip_package or cls.name}`"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = """optuna"""
@staticmethod
def lowercase_ ( ) -> int:
'''simple docstring'''
return is_optuna_available()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_optuna(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return default_hp_space_optuna(UpperCamelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = """ray"""
snake_case_ = """'ray[tune]'"""
@staticmethod
def lowercase_ ( ) -> Any:
'''simple docstring'''
return is_ray_available()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return default_hp_space_ray(UpperCamelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = """sigopt"""
@staticmethod
def lowercase_ ( ) -> str:
'''simple docstring'''
return is_sigopt_available()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_sigopt(UpperCamelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = """wandb"""
@staticmethod
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
return is_wandb_available()
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
return run_hp_search_wandb(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return default_hp_space_wandb(UpperCamelCase__ )
__A = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
F"""{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 90
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__snake_case : Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg')
__snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : List[Any] =cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE, 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Tuple =imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ : str =canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE, 5, sigma=0.9 ).all()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A__ : Optional[int] =conv.img_convolve(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
assert med.median_filter(_SCREAMING_SNAKE_CASE, 3 ).any()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : List[Any] =sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple =sp.make_sepia(_SCREAMING_SNAKE_CASE, 20 )
assert sepia.all()
def __lowerCamelCase ( __snake_case : List[str] = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
"""simple docstring"""
A__ : Any =bs.Burkes(imread(_SCREAMING_SNAKE_CASE, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __snake_case : Optional[Any] = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] =rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any ="digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
A__ : List[Any] =imread(_SCREAMING_SNAKE_CASE, 0 )
# Test for get_neighbors_pixel function() return not None
A__ : Optional[int] =0
A__ : Any =0
A__ : List[Any] =image[x_coordinate][y_coordinate]
A__ : str =lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ : Optional[int] =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A__ : List[Any] =lbp.local_binary_value(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 134
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
if tokenize_kwargs is None:
__lowerCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
__lowerCAmelCase = truncation
__lowerCAmelCase = tokenize_kwargs
__lowerCAmelCase = {}
if return_tensors is not None:
__lowerCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict[str, GenericTensor]:
__lowerCAmelCase = self.framework
__lowerCAmelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
__lowerCAmelCase = self.model(**UpperCamelCase__ )
return model_outputs
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> str:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 229
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 0
|
import datasets
__snake_case = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__snake_case = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__snake_case = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
| 348
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Dict ,__lowercase : Any ):
'''simple docstring'''
A_ : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
A_ : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 140
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
'''simple docstring'''
import random
def UpperCamelCase_( snake_case : List[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def UpperCamelCase_( snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
snake_case_ = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
snake_case_ = 0
snake_case_ = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 85
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__)
class __A ( lowerCAmelCase__ ):
def __init__(self : Optional[Any] , __a : Dict , __a : List[str] , __a : Any , __a : Optional[Any]=None ):
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
UpperCAmelCase_ = None
def _lowercase (self : str , __a : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
UpperCAmelCase_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase_ = str(distributed_port + 1 )
UpperCAmelCase_ = dist.new_group(ranks=UpperCamelCase__ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase (self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def _lowercase (self : Any , __a : int , __a : Tuple , __a : Optional[Any]=torch.floataa ):
UpperCAmelCase_ = torch.empty(UpperCamelCase__ , dtype=UpperCamelCase__ )
dist.scatter(UpperCamelCase__ , src=0 , scatter_list=UpperCamelCase__ , group=self.process_group )
return target_tensor
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCamelCase__ )
return ifname
def _lowercase (self : Dict , __a : int , __a : Any ):
# single GPU training
if not dist.is_initialized():
UpperCAmelCase_ = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
# distributed training
UpperCAmelCase_ = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase_ = None
if self._is_main():
UpperCAmelCase_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase__ )]
dist.gather(torch.tensor(UpperCamelCase__ ) , dst=0 , gather_list=UpperCamelCase__ , group=self.process_group )
# scatter logic
UpperCAmelCase_ = question_hidden_states.shape[0]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if self._is_main():
assert len(UpperCamelCase__ ) == world_size
UpperCAmelCase_ = self._main_retrieve(torch.cat(UpperCamelCase__ ).numpy() , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ )
UpperCAmelCase_ = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = self._scattered(UpperCamelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase_ = self._scattered(UpperCamelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase__ )
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a__ = TextToVideoSDPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase_ (self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCAmelCase = CLIPTextModel(UpperCamelCase__ )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Union[str, Any]:
if str(UpperCamelCase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
__UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = TextToVideoSDPipeline(**UpperCamelCase__ )
__UpperCAmelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
__UpperCAmelCase = "np"
__UpperCAmelCase = sd_pipe(**UpperCamelCase__ ).frames
__UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ (self ) -> Any:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase_ (self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ (self ) -> Dict:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__UpperCAmelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__UpperCAmelCase = pipe.to('''cuda''' )
__UpperCAmelCase = "Spiderman is surfing"
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=25 , output_type='''pt''' ).frames
__UpperCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__UpperCAmelCase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__UpperCAmelCase = pipe.to('''cuda''' )
__UpperCAmelCase = "Spiderman is surfing"
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''pt''' ).frames
__UpperCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 333
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ = 25_0004
lowercase__ = 25_0020
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def A_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Tuple = MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A_ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
_lowerCamelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase__ )
_lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[int] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Tuple = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
_lowerCamelCase : Dict = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCamelCase__ )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : str = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
_lowerCamelCase : str = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : List[str] = tokenizer_r.from_pretrained(UpperCamelCase__ )
_lowerCamelCase : List[str] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/mbart-large-en-ro"""
lowerCamelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def A_ ( cls ):
_lowerCamelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCamelCase : Dict = 1
return cls
def A_ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def A_ ( self ):
_lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def A_ ( self ):
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
_lowerCamelCase : List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
_lowerCamelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def A_ ( self ):
_lowerCamelCase : Any = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def A_ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def A_ ( self ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
_lowerCamelCase : int = MBartTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='pt' )
_lowerCamelCase : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCamelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A_ ( self ):
_lowerCamelCase : int = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors='pt' )
_lowerCamelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors='pt' )
_lowerCamelCase : Tuple = targets["input_ids"]
_lowerCamelCase : List[Any] = shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 96
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=13 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=99 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=37 , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = parent
A: Any = batch_size
A: Dict = seq_length
A: Optional[int] = is_training
A: int = use_attention_mask
A: Union[str, Any] = use_token_type_ids
A: List[str] = use_labels
A: Optional[Any] = vocab_size
A: int = hidden_size
A: Tuple = num_hidden_layers
A: List[str] = num_attention_heads
A: List[str] = intermediate_size
A: str = hidden_act
A: Any = hidden_dropout_prob
A: Optional[int] = attention_probs_dropout_prob
A: List[str] = max_position_embeddings
A: Optional[Any] = type_vocab_size
A: Tuple = type_sequence_label_size
A: Any = initializer_range
A: Optional[Any] = num_choices
def _snake_case ( self : str ) -> Dict:
'''simple docstring'''
A: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A: str = None
if self.use_attention_mask:
A: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A: Optional[Any] = None
if self.use_token_type_ids:
A: int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A: Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : int ) -> Tuple:
'''simple docstring'''
A: Optional[int] = self.prepare_config_and_inputs()
A: int = config_and_inputs
A: Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
A: str = self.prepare_config_and_inputs()
A: Tuple = config_and_inputs
A: List[Any] = True
A: str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A: int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = True
UpperCamelCase_ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A: Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase__ )
A: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase__ )
A: Tuple = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
A: List[str] = model(UpperCamelCase__ )[0]
A: List[str] = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A: Dict = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _snake_case ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A: List[str] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase__ )
A: Optional[int] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
A: List[str] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A: Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 319
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ )
__lowerCamelCase = Sql(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , )
# Build dataset for splits
__lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCamelCase = dataset
__lowerCamelCase = name
__lowerCamelCase = con
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = to_sql_kwargs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.to_sql_kwargs.pop('sql' , UpperCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('con' , UpperCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('index' , UpperCamelCase__ )
__lowerCamelCase = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs )
return written
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = args
__lowerCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas()
__lowerCamelCase = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ )
return num_rows or len(UpperCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 90
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Dict=24 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Tuple=37 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=10 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=2 , ) -> List[Any]:
'''simple docstring'''
A__ : Union[str, Any] =parent
A__ : Any =batch_size
A__ : List[Any] =patch_size
A__ : List[str] =max_length
A__ : Optional[Any] =num_mel_bins
A__ : Optional[int] =is_training
A__ : Optional[int] =use_labels
A__ : Dict =hidden_size
A__ : Union[str, Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : List[str] =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[str] =hidden_dropout_prob
A__ : Optional[int] =attention_probs_dropout_prob
A__ : int =type_sequence_label_size
A__ : int =initializer_range
A__ : Union[str, Any] =scope
A__ : str =frequency_stride
A__ : Union[str, Any] =time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ : Optional[Any] =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A__ : Union[str, Any] =(self.max_length - self.patch_size) // self.time_stride + 1
A__ : str =frequency_out_dimension * time_out_dimension
A__ : str =num_patches + 2
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ : Optional[int] =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A__ : List[Any] =None
if self.use_labels:
A__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : int =self.get_config()
return config, input_values, labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[int] =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.prepare_config_and_inputs()
(
A__
) : int =config_and_inputs
A__ : int ={"input_values": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : List[Any] =ASTModelTester(self )
A__ : Any =ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(UpperCamelCase__ )
A__ : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : int =[*signature.parameters.keys()]
A__ : Union[str, Any] =["input_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str =ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] =hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""", filename="""sample_audio.flac""", repo_type="""dataset""" )
A__ : List[str] =torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.default_feature_extractor
A__ : Dict =ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(UpperCamelCase__ )
A__ : Dict =self.default_feature_extractor
A__ : int =prepare_audio()
A__ : Union[str, Any] =audio.squeeze().numpy()
A__ : str =feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any =model(**UpperCamelCase__ )
# verify the logits
A__ : List[str] =torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 134
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE : Optional[int] = False
def a ( self : Dict ) -> str:
__lowerCAmelCase = 32
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase__ , projection_dim=UpperCamelCase__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase__ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase__ )
__lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase__ , layers_per_block=1 , upcast_attention=UpperCamelCase__ , use_linear_projection=UpperCamelCase__ , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ) -> str:
if str(UpperCamelCase__ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
__lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase__ )
def a ( self : str ) -> Optional[Any]:
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase__ )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : int ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> str:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe("""anime turle""" , generator=UpperCamelCase__ , output_type="""np""" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def a ( self : Optional[int] ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 229
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 0
|
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
UpperCAmelCase : Dict =1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase : Optional[Any] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = mock.Mock()
A_ : Union[str, Any] = 5_0_0
A_ : str = {}
A_ : Any = HTTPError
A_ : List[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase__ ) as mock_head:
A_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Any = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
A_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id='test-feature-extractor' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
A_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
A_ : List[str] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
A_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 140
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
def __init__( self : Any , _A : List[str] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def __call__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case_ : Optional[Any] = 1
snake_case_ : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case_ : int = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
snake_case_ : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase__ )
return result
| 327
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 0
|
'''simple docstring'''
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if any(not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_SCREAMING_SNAKE_CASE , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 85
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_: int ={
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_: str ={
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_: Dict =0
SCREAMING_SNAKE_CASE_: Tuple =1
SCREAMING_SNAKE_CASE_: Optional[int] =2
SCREAMING_SNAKE_CASE_: List[str] =3
SCREAMING_SNAKE_CASE_: Optional[int] =4
class __A ( lowerCAmelCase__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = """left"""
def __init__(self : Tuple , __a : int , __a : Any=False , __a : Tuple=True , __a : Optional[Any]=False , __a : Dict="<s>" , __a : Dict="</s>" , __a : int="<unk>" , __a : Tuple="<sep>" , __a : Any="<pad>" , __a : Optional[Any]="<cls>" , __a : Any="<mask>" , __a : Optional[int]=["<eop>", "<eod>"] , __a : int = None , **__a : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase (self : List[Any] ):
return len(self.sp_model )
def _lowercase (self : Dict ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__(self : Union[str, Any] , __a : int ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase (self : Any , __a : List[Any] ):
if self.remove_space:
UpperCAmelCase_ = " ".join(inputs.strip().split() )
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize("NFKD" , UpperCamelCase__ )
UpperCAmelCase_ = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def _lowercase (self : Optional[Any] , __a : Any ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
UpperCAmelCase_ = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase (self : List[Any] , __a : Union[str, Any] ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase (self : List[str] , __a : Any ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase (self : Any , __a : int ):
UpperCAmelCase_ = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase (self : str , __a : Union[str, Any] , __a : str = False , __a : Dict = None , __a : str = True , **__a : List[Any] , ):
UpperCAmelCase_ = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
UpperCAmelCase_ = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
UpperCAmelCase_ = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = "".join(UpperCamelCase__ )
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase (self : Any , __a : str , __a : Tuple = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase (self : int , __a : List[str] , __a : List[str] = None , __a : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase (self : int , __a : int , __a : Union[str, Any] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase (self : Optional[Any] , __a : str , __a : List[Any] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A_ : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 333
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCAmelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = """openai-gpt"""
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=40478 , lowercase=512 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-5 , lowercase=0.02 , lowercase="cls_index" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , **lowercase , ):
_lowerCamelCase : str = vocab_size
_lowerCamelCase : str = n_positions
_lowerCamelCase : List[Any] = n_embd
_lowerCamelCase : int = n_layer
_lowerCamelCase : int = n_head
_lowerCamelCase : Tuple = afn
_lowerCamelCase : List[str] = resid_pdrop
_lowerCamelCase : Any = embd_pdrop
_lowerCamelCase : Union[str, Any] = attn_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Any = summary_type
_lowerCamelCase : int = summary_use_proj
_lowerCamelCase : Union[str, Any] = summary_activation
_lowerCamelCase : Union[str, Any] = summary_first_dropout
_lowerCamelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**UpperCamelCase__ )
| 96
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = XLNetTokenizer
UpperCamelCase_ : Union[str, Any] = XLNetTokenizerFast
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Optional[int] = True
def _snake_case ( self : Any ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A: int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
A: str = "<s>"
A: Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(UpperCamelCase__ ) , 10_06 )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
A: int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A: str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] )
A: int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A: int = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A: Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Union[str, Any] = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
A: Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def _snake_case ( self : Tuple ) -> int:
'''simple docstring'''
A: str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
A: Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def _snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A: List[Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
A: List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
A: Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
A: str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A: List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A: str = {"input_ids": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 319
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case_ = ["""input_values""", """attention_mask"""]
def __init__( self , lowerCamelCase__ = 1 , lowerCamelCase__ = 16_000 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = False , lowerCamelCase__ = 80 , lowerCamelCase__ = 16 , lowerCamelCase__ = 64 , lowerCamelCase__ = "hann_window" , lowerCamelCase__ = 1.0 , lowerCamelCase__ = 80 , lowerCamelCase__ = 7_600 , lowerCamelCase__ = 1e-10 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
__lowerCamelCase = do_normalize
__lowerCamelCase = return_attention_mask
__lowerCamelCase = num_mel_bins
__lowerCamelCase = hop_length
__lowerCamelCase = win_length
__lowerCamelCase = win_function
__lowerCamelCase = frame_signal_scale
__lowerCamelCase = fmin
__lowerCamelCase = fmax
__lowerCamelCase = mel_floor
__lowerCamelCase = reduction_factor
__lowerCamelCase = win_length * sampling_rate // 1_000
__lowerCamelCase = hop_length * sampling_rate // 1_000
__lowerCamelCase = optimal_fft_length(self.sample_size )
__lowerCamelCase = (self.n_fft // 2) + 1
__lowerCamelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , UpperCamelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , UpperCamelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
__lowerCamelCase = np.array(UpperCamelCase__ , np.intaa )
__lowerCamelCase = []
for vector, length in zip(UpperCamelCase__ , attention_mask.sum(-1 ) ):
__lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__lowerCamelCase = padding_value
normed_input_values.append(UpperCamelCase__ )
else:
__lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def lowercase_ ( self , lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = spectrogram(
UpperCamelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
__lowerCamelCase = self._process_audio(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , )
else:
__lowerCamelCase = None
if audio_target is not None:
__lowerCamelCase = self._process_audio(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , )
if inputs is None:
return inputs_target
else:
__lowerCamelCase = inputs_target["input_values"]
__lowerCamelCase = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
__lowerCamelCase = decoder_attention_mask
return inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
__lowerCamelCase = isinstance(UpperCamelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
__lowerCamelCase = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [speech]
# needed to make pad() work on spectrogram inputs
__lowerCamelCase = self.feature_size
# convert into correct format for padding
if is_target:
__lowerCamelCase = [self._extract_mel_features(UpperCamelCase__ ) for waveform in speech]
__lowerCamelCase = BatchFeature({'input_values': features} )
__lowerCamelCase = self.num_mel_bins
else:
__lowerCamelCase = BatchFeature({'input_values': speech} )
__lowerCamelCase = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCamelCase = feature_size_hack
# convert input values to correct format
__lowerCamelCase = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
__lowerCamelCase = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCamelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__lowerCamelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCamelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__lowerCamelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
__lowerCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__lowerCamelCase = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__lowerCamelCase = (
attention_mask
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=UpperCamelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
__lowerCamelCase = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
def lowercase_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowerCamelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__lowerCamelCase = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 90
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 0
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , *lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =eval_examples
A__ : Union[str, Any] =post_process_function
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any = "eval" ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[int] =self.eval_dataset if eval_dataset is None else eval_dataset
A__ : str =self.get_eval_dataloader(UpperCamelCase__ )
A__ : int =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ : List[str] =self.compute_metrics
A__ : List[str] =None
A__ : Optional[Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ : Dict =time.time()
try:
A__ : Union[str, Any] =eval_loop(
UpperCamelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
A__ : Tuple =compute_metrics
A__ : Union[str, Any] =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ : int =self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
A__ : int =self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
A__ : List[str] =metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
A__ : Optional[Any] =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ : Tuple =self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any] = "test" ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
A__ : Tuple =self.compute_metrics
A__ : Dict =None
A__ : int =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
A__ : Optional[int] =time.time()
try:
A__ : Optional[int] =eval_loop(
UpperCamelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
A__ : Dict =compute_metrics
A__ : int =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ : Any =self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , """predict""" )
A__ : Union[str, Any] =self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
A__ : List[str] =metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 134
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = ["""torch""", """scipy"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def a ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(cls , ["""torch""", """scipy"""] )
| 229
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=10_24 , __lowerCAmelCase=10_24 , __lowerCAmelCase=False , **__lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple =SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path='''train''' , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] =tok.pad_token_id
def get_lens(__lowerCAmelCase ):
UpperCAmelCase : Any =tqdm(
DataLoader(_SCREAMING_SNAKE_CASE , batch_size=5_12 , num_workers=8 , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase : Optional[int] =[]
for batch in dl:
UpperCAmelCase : List[Any] =batch["input_ids"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
UpperCAmelCase : List[Any] =batch["labels"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
max_lens.append(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
max_lens.extend(_SCREAMING_SNAKE_CASE )
return max_lens
UpperCAmelCase : List[Any] =get_lens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] =SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path='''val''' , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict =get_lens(_SCREAMING_SNAKE_CASE )
pickle_save(_SCREAMING_SNAKE_CASE , train_ds.len_file )
pickle_save(_SCREAMING_SNAKE_CASE , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 348
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : typing.Counter[int] = Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(_SCREAMING_SNAKE_CASE ,max_perimeter + 1 ):
A_ : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_SCREAMING_SNAKE_CASE ):
A_ : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase ( __lowercase : Optional[Any] = 10_00 ):
'''simple docstring'''
A_ : Dict = pythagorean_triple(_SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 140
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 0
|
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , _A : int ) -> None:
"""simple docstring"""
snake_case_ : int = set_counts
snake_case_ : Any = max(UpperCamelCase__ )
snake_case_ : str = len(UpperCamelCase__ )
snake_case_ : List[str] = [1] * num_sets
snake_case_ : Any = list(range(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Dict , _A : Union[str, Any] ) -> bool:
"""simple docstring"""
snake_case_ : Optional[Any] = self.get_parent(UpperCamelCase__ )
snake_case_ : Union[str, Any] = self.get_parent(UpperCamelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case_ : Optional[int] = 0
snake_case_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case_ : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case_ : int = 0
snake_case_ : Dict = src_parent
snake_case_ : Tuple = self.set_counts[src_parent]
snake_case_ : int = max(self.max_set , UpperCamelCase__ )
return True
def UpperCAmelCase_ ( self : Dict , _A : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
snake_case_ : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 327
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
'''simple docstring'''
def UpperCamelCase_( snake_case : Dict , snake_case : List[Any] , snake_case : int = 0 , snake_case : Dict = 0 ):
'''simple docstring'''
snake_case_ = right or len(_SCREAMING_SNAKE_CASE ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE_: Optional[int] ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(f"""Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
from __future__ import annotations
def __a ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__UpperCAmelCase = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__UpperCAmelCase = 1
return True
__UpperCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCAmelCase = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE )
):
return True
__UpperCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( *lowercase , **lowercase ):
pass
def _snake_case ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = pipeline(
'document-question-answering' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = INVOICE_URL
_lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '' ) ) )
_lowerCamelCase : Optional[Any] = "What is the placebo?"
_lowerCamelCase : int = [
{
"image": load_image(UpperCamelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Dict = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{'score': ANY(UpperCamelCase__ ), 'answer': ANY(UpperCamelCase__ ), 'start': ANY(UpperCamelCase__ ), 'end': ANY(UpperCamelCase__ )},
{'score': ANY(UpperCamelCase__ ), 'answer': ANY(UpperCamelCase__ ), 'start': ANY(UpperCamelCase__ ), 'end': ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A_ ( self ):
_lowerCamelCase : Any = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
_lowerCamelCase : Union[str, Any] = INVOICE_URL
_lowerCamelCase : Tuple = "How many cats are there?"
_lowerCamelCase : Dict = [
{"score": 0.00_01, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.00_01, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
_lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
_lowerCamelCase : int = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCamelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCamelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Optional[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A_ ( self ):
_lowerCamelCase : Tuple = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
_lowerCamelCase : List[str] = INVOICE_URL
_lowerCamelCase : str = "What is the invoice number?"
_lowerCamelCase : int = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_lowerCamelCase : Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_lowerCamelCase : Tuple = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A_ ( self ):
_lowerCamelCase : Optional[int] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
_lowerCamelCase : Dict = INVOICE_URL
_lowerCamelCase : Optional[Any] = "What is the invoice number?"
_lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_lowerCamelCase : Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_lowerCamelCase : int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A_ ( self ):
_lowerCamelCase : str = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=UpperCamelCase__ )
_lowerCamelCase : Dict = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=UpperCamelCase__ , revision='3dc6de3' , )
_lowerCamelCase : Dict = INVOICE_URL
_lowerCamelCase : Any = "What is the invoice number?"
_lowerCamelCase : Union[str, Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
_lowerCamelCase : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
_lowerCamelCase : Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
_lowerCamelCase : int = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase : List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A_ ( self ):
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=UpperCamelCase__ )
_lowerCamelCase : Dict = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=UpperCamelCase__ , revision='3dc6de3' , max_seq_len=50 , )
_lowerCamelCase : List[str] = INVOICE_URL
_lowerCamelCase : Any = "What is the invoice number?"
_lowerCamelCase : str = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_lowerCamelCase : Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
_lowerCamelCase : Tuple = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase : Optional[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[int] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
_lowerCamelCase : str = INVOICE_URL
_lowerCamelCase : Any = "What is the invoice number?"
_lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def A_ ( self ):
pass
| 96
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = XLMRobertaTokenizer
UpperCamelCase_ : str = XLMRobertaTokenizerFast
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = True
def _snake_case ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A: Any = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A: Optional[int] = "<pad>"
A: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A: str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def _snake_case ( self : int ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A: List[str] = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A: str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A: int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A: Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A: Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A: Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A: str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A: Any = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A: Optional[Any] = tempfile.mkdtemp()
A: Dict = tokenizer_r.save_pretrained(UpperCamelCase__ )
A: Tuple = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
A: Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
A: int = tokenizer_r.from_pretrained(UpperCamelCase__ )
A: Any = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
A: str = tempfile.mkdtemp()
A: int = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
A: List[str] = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
A: Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase__ )
A: Any = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
A: List[str] = tempfile.mkdtemp()
A: Any = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
A: int = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A: List[str] = tokenizer_r.from_pretrained(UpperCamelCase__ )
A: Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@cached_property
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
A: Optional[Any] = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase__ )
A: Optional[Any] = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def _snake_case ( self : Tuple ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A: Dict = self.get_tokenizer()
A: Optional[Any] = self.get_rust_tokenizer()
A: int = "I was born in 92000, and this is falsé."
A: List[str] = tokenizer.tokenize(UpperCamelCase__ )
A: List[Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A: str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A: Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A: Optional[Any] = self.get_rust_tokenizer()
A: List[str] = tokenizer.encode(UpperCamelCase__ )
A: int = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _snake_case ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A: str = "Hello World!"
A: Optional[Any] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _snake_case ( self : str ) -> int:
'''simple docstring'''
A: List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A: Union[str, Any] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
A: int = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 319
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 0
|
from collections.abc import Sequence
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> float:
"""simple docstring"""
__lowerCamelCase = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
__lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__A = (0.0, 0.0, 5.0, 9.3, 7.0)
__A = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 90
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 0
|
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( __snake_case : Union[str, Any] = 20 ) -> int:
"""simple docstring"""
A__ : List[str] =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A__ : Optional[Any] =n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__snake_case : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 134
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 0
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( snake_case_ : Any ) -> Dict:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
super().__init__()
__lowerCAmelCase = module
__lowerCAmelCase = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , )
__lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = """bigscience/bloom-1b7"""
# Constant values
_SCREAMING_SNAKE_CASE : int = 2.109659552692574
_SCREAMING_SNAKE_CASE : Union[str, Any] = """Hello my name is"""
_SCREAMING_SNAKE_CASE : Dict = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = 10
def a ( self : Optional[int] ) -> List[str]:
# Models and tokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Tuple ) -> Tuple:
super().setUp()
# Models and tokenizer
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
def a ( self : str ) -> Dict:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""" ) )
__lowerCAmelCase = config.to_dict()
__lowerCAmelCase = config.to_diff_dict()
__lowerCAmelCase = config.to_json_string()
def a ( self : Tuple ) -> Any:
from bitsandbytes.nn import Paramsabit
__lowerCAmelCase = self.model_fpaa.get_memory_footprint()
__lowerCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : str ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def a ( self : Dict ) -> Any:
__lowerCAmelCase = BitsAndBytesConfig()
__lowerCAmelCase = True
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def a ( self : str ) -> str:
with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__ )
def a ( self : Tuple ) -> Any:
__lowerCAmelCase = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__ ):
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def a ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(UpperCamelCase__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
__lowerCAmelCase = self.model_fpaa.to(torch.floataa )
__lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCAmelCase = self.model_fpaa.float()
def a ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a ( cls : str ) -> Union[str, Any]:
__lowerCAmelCase = "t5-small"
__lowerCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCAmelCase = "Translate in German: Hello, my dog is cute"
def a ( self : List[str] ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict ) -> Optional[int]:
from transformers import TaForConditionalGeneration
__lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCAmelCase = None
# test with `t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
__lowerCAmelCase = modules
def a ( self : str ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
__lowerCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
__lowerCAmelCase = model.generate(**UpperCamelCase__ )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : List[Any] ) -> Optional[Any]:
super().setUp()
# model_name
__lowerCAmelCase = "bigscience/bloom-560m"
__lowerCAmelCase = "t5-small"
# Different types of model
__lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# Sequence classification model
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# CausalLM model
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
# Seq2seq model
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""" )
def a ( self : Optional[int] ) -> List[str]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> int:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Optional[int] ) -> List[str]:
super().setUp()
def a ( self : List[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : List[str] ) -> List[str]:
__lowerCAmelCase = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : Tuple ) -> Optional[Any]:
super().setUp()
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCAmelCase = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
__lowerCAmelCase = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def a ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = "facebook/opt-350m"
super().setUp()
def a ( self : Optional[int] ) -> Dict:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__ ) ):
__lowerCAmelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCAmelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCAmelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCAmelCase = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCAmelCase = model.forward(**UpperCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = """gpt2-xl"""
_SCREAMING_SNAKE_CASE : int = 3.3191854854152187
| 229
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__snake_case = {
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
__snake_case = '▁'
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = ["""input_ids""", """token_type_ids"""]
__lowerCamelCase : List[str] = FNetTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__="<unk>" , snake_case__="[SEP]" , snake_case__="<pad>" , snake_case__="[CLS]" , snake_case__="[MASK]" , **snake_case__ , ) -> str:
'''simple docstring'''
UpperCAmelCase : Any =(
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase : Optional[Any] =do_lower_case
UpperCAmelCase : Union[str, Any] =remove_space
UpperCAmelCase : str =keep_accents
UpperCAmelCase : Optional[Any] =vocab_file
UpperCAmelCase : List[Any] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : int =[self.sep_token_id]
UpperCAmelCase : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =[self.sep_token_id]
UpperCAmelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : int =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 348
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 140
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 0
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _snake_case ( lowerCAmelCase__ ):
lowerCAmelCase_ : List[Any] = CustomTokenizer
pass
| 85
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 0
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: Optional[int] =[
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCAmelCase_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[str]=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = torch.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = WavLMConfigOrig(checkpoint["cfg"] )
UpperCAmelCase_ = WavLMOrig(_SCREAMING_SNAKE_CASE )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCAmelCase_ = WavLMConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = WavLMConfig()
UpperCAmelCase_ = WavLMModel(_SCREAMING_SNAKE_CASE )
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_wavlm.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 0
|
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 0
|
"""simple docstring"""
import random
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : str = [ord(UpperCamelCase__ ) for i in text]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[Any] = []
for i in plain:
_lowerCamelCase : Optional[int] = random.randint(1 , 300 )
_lowerCamelCase : Any = (i + k) * k
cipher.append(UpperCamelCase__ )
key.append(UpperCamelCase__ )
return cipher, key
@staticmethod
def A_ ( lowercase , lowercase ):
_lowerCamelCase : int = []
for i in range(len(UpperCamelCase__ ) ):
_lowerCamelCase : List[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCamelCase__ ) )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
lowercase__ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 96
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
A: int = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
A: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A: int = x_den * y_den * z_den
A: int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE( __lowercase = 3_5 ) -> int:
A: set = set()
A: int
A: Fraction = Fraction(0 )
A: tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A: Dict = x_num * y_den + x_den * y_num
A: Any = x_den * y_den
A: int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A: Tuple = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
A: Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A: Any = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
A: List[str] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
A: Tuple = int(sqrt(_SCREAMING_SNAKE_CASE ) )
A: int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A: Optional[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
A: int = x_num * y_num
A: int = x_den * y_num + x_num * y_den
A: int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A: Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
A: str = x_num * x_num * y_num * y_num
A: List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
A: str = int(sqrt(_SCREAMING_SNAKE_CASE ) )
A: List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
A: Any = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A: Optional[int] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 319
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 20
__lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
__lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCamelCase = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create ramp distribution
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCamelCase = 5
__lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
__lowerCamelCase = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCamelCase = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
__lowerCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCamelCase = 5
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = 15
__lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
__lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCamelCase = 1
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = 5
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCamelCase = 4
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = 10
# no processor list
__lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
__lowerCamelCase = jax.jit(UpperCamelCase__ )
__lowerCamelCase = jax.jit(UpperCamelCase__ )
__lowerCamelCase = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """BlipImageProcessor"""
__snake_case = """AutoTokenizer"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ : Dict =False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] =self.image_processor
def __call__( self : int , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Dict = True , lowerCAmelCase_ : int = False , lowerCAmelCase_ : int = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Tuple = False , lowerCAmelCase_ : int = False , lowerCAmelCase_ : List[Any] = False , lowerCAmelCase_ : Dict = False , lowerCAmelCase_ : List[Any] = False , lowerCAmelCase_ : List[str] = True , lowerCAmelCase_ : Dict = None , **lowerCAmelCase_ : Tuple , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A__ : int =self.tokenizer
A__ : str =self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
A__ : Dict =self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
A__ : Union[str, Any] =self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
A__ : List[Any] =None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase__ ( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.tokenizer.model_input_names
A__ : Optional[int] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 134
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase_ ( snake_case_ : Dict ) -> list[list[float]]:
'''simple docstring'''
__lowerCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCAmelCase = matrix[1][1], matrix[0][0]
__lowerCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__lowerCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCAmelCase = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__lowerCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCAmelCase = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 229
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 0
|
from math import sqrt
def UpperCamelCase ( __lowercase : Optional[int] = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 140
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 327
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( lowerCAmelCase__ ):
def __init__( self ) -> Any:
'''simple docstring'''
snake_case_ = []
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Tuple:
'''simple docstring'''
self.events.append("on_init_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[str]:
'''simple docstring'''
self.events.append("on_train_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[str]:
'''simple docstring'''
self.events.append("on_train_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> int:
'''simple docstring'''
self.events.append("on_epoch_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[str]:
'''simple docstring'''
self.events.append("on_epoch_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
self.events.append("on_step_begin" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Tuple:
'''simple docstring'''
self.events.append("on_step_end" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Dict:
'''simple docstring'''
self.events.append("on_evaluate" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Dict:
'''simple docstring'''
self.events.append("on_predict" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
self.events.append("on_save" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
self.events.append("on_log" )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
self.events.append("on_prediction_step" )
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = tempfile.mkdtemp()
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def lowerCAmelCase__ ( self , a__=0 , a__=0 , a__=64 , a__=64 , a__=None , a__=False , **a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = RegressionDataset(length=UpperCamelCase__ )
snake_case_ = RegressionDataset(length=UpperCamelCase__ )
snake_case_ = RegressionModelConfig(a=UpperCamelCase__ , b=UpperCamelCase__ )
snake_case_ = RegressionPreTrainedModel(UpperCamelCase__ )
snake_case_ = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase__ , report_to=[] , **UpperCamelCase__ )
return Trainer(
UpperCamelCase__ , UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , callbacks=UpperCamelCase__ , )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
# Order doesn't matter
snake_case_ = sorted(UpperCamelCase__ , key=lambda a__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ )
snake_case_ = sorted(UpperCamelCase__ , key=lambda a__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase__ , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , cba.__class__ )
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(cba.__class__ , UpperCamelCase__ )
else:
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = ["on_init_end", "on_train_begin"]
snake_case_ = 0
snake_case_ = len(trainer.get_eval_dataloader() )
snake_case_ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(UpperCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.get_trainer()
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# Callbacks passed at init are added to the default callbacks
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ = self.get_trainer(disable_tqdm=UpperCamelCase__ )
snake_case_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase__ )
expected_callbacks.remove(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.pop_callback(UpperCamelCase__ )
self.assertEqual(cb.__class__ , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
trainer.add_callback(UpperCamelCase__ )
expected_callbacks.insert(0 , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# We can also add, pop, or remove by instance
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase__ )
expected_callbacks.remove(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
snake_case_ = trainer.pop_callback(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
trainer.add_callback(UpperCamelCase__ )
expected_callbacks.insert(0 , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=UpperCamelCase__ )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# Independent log/save/eval
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# A bit of everything
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase__ ) in warn_mock.call_args[0][0]
| 85
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = set({"(", "[", "{"} )
UpperCAmelCase_ = set({")", "]", "}"} )
UpperCAmelCase_ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = TFViTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTModelTester(self)
__a = TFBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = TFDeiTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFRobertaModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = TFDeiTModelTester(self)
__a = TFRobertaModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = TFCLIPVisionModelTester(self)
__a = TFBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 49
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__UpperCAmelCase ) ,'''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.resolver.convert_models(['''heb-eng'''])
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE)
assert mmeta["long_pair"] == "heb-eng"
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class _A :
UpperCamelCase__ : List[str] = list_field(
default=[] ,metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} ,)
UpperCamelCase__ : List[int] = list_field(
default=[8] ,metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCamelCase__ : List[int] = list_field(
default=[8, 32, 128, 512] ,metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Benchmark training of model'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Verbose memory tracing'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} ,)
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Trace memory line by line'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Save result to a CSV file'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Whether to print environment information'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} ,)
UpperCamelCase__ : str = field(
default=F'''inference_time_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving time results to csv.'''} ,)
UpperCamelCase__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} ,)
UpperCamelCase__ : str = field(
default=F'''train_time_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} ,)
UpperCamelCase__ : str = field(
default=F'''train_memory_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} ,)
UpperCamelCase__ : str = field(
default=F'''env_info_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving environment information.'''} ,)
UpperCamelCase__ : str = field(
default=F'''log_{round(time() )}.csv''' ,metadata={'''help''': '''Log filename used if print statements are saved in log.'''} ,)
UpperCamelCase__ : int = field(default=3 ,metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} ,)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''')
return self.models
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''')
return False
else:
return True
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
from math import ceil, sqrt
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 49
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 1
|
__snake_case :Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__snake_case :List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __snake_case ( _UpperCAmelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __snake_case ( _UpperCAmelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __snake_case ( ):
__a = '''Morse code here!'''
print(_UpperCAmelCase )
__a = encrypt(_UpperCAmelCase )
print(_UpperCAmelCase )
__a = decrypt(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = False , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1 , ):
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(_UpperCAmelCase )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) # picking a random neighbor
__a = neighbors.pop(_UpperCAmelCase )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCAmelCase ) , _UpperCAmelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case :List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__snake_case :str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :Any = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (3 * x**2) - (6 * y)
__snake_case :List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
__snake_case :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 49
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=18 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=True , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size_divisor
__a = do_rescale
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Tuple = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = GLPNImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size_divisor'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''resample'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale'''))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = (EulerDiscreteScheduler,)
UpperCamelCase__ : List[str] = 10
def _lowerCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : int):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(self.num_inference_steps)
__a = torch.manual_seed(0)
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(__SCREAMING_SNAKE_CASE)
for i, t in enumerate(scheduler.timesteps):
__a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
__a = output.prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 10.08_07) < 1E-2
assert abs(result_mean.item() - 0.01_31) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(self.num_inference_steps)
__a = torch.manual_seed(0)
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(__SCREAMING_SNAKE_CASE)
for i, t in enumerate(scheduler.timesteps):
__a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
__a = output.prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 0.00_02) < 1E-2
assert abs(result_mean.item() - 2.2676E-06) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(__SCREAMING_SNAKE_CASE)
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
__a = output.prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 10.08_07) < 1E-2
assert abs(result_mean.item() - 0.01_31) < 1E-3
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(__SCREAMING_SNAKE_CASE)
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
__a = output.prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63) < 1E-3
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case :Optional[Any] = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = ['''GLPNFeatureExtractor''']
__snake_case :List[Any] = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case :str = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = ['''input_features''', '''attention_mask''']
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=80 , __SCREAMING_SNAKE_CASE : List[str]=16_000 , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = num_mel_bins
__a = do_ceptral_normalize
__a = normalize_means
__a = normalize_vars
__a = True
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , ):
'''simple docstring'''
__a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__a = torch.from_numpy(__SCREAMING_SNAKE_CASE).unsqueeze(0)
__a = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
__a = x[:input_length].mean(axis=0)
__a = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if normalize_vars:
__a = x[:input_length].std(axis=0)
__a = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa)
return x
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None):
'''simple docstring'''
__a = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
]
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__a = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
__a = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
__a = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__a = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE) for waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({'''input_features''': features})
__a = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
__a = padded_inputs.get('''input_features''')
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE):
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features]
__a = padded_inputs.get('''attention_mask''')
if attention_mask is not None:
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa)
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE)
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE)
return padded_inputs
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _A ( __UpperCAmelCase ):
def __get__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''')
__a = '''__cached_''' + self.fget.__name__
__a = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if cached is None:
__a = self.fget(__SCREAMING_SNAKE_CASE)
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return cached
def __snake_case ( _UpperCAmelCase ):
__a = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def __snake_case ( _UpperCAmelCase ):
if is_torch_fx_proxy(_UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_UpperCAmelCase , np.ndarray )
def __snake_case ( _UpperCAmelCase ):
return isinstance(_UpperCAmelCase , np.ndarray )
def __snake_case ( _UpperCAmelCase ):
return _is_numpy(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import torch
return isinstance(_UpperCAmelCase , torch.Tensor )
def __snake_case ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import torch
return isinstance(_UpperCAmelCase , torch.device )
def __snake_case ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_device(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import torch
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
__a = getattr(_UpperCAmelCase , _UpperCAmelCase )
else:
return False
return isinstance(_UpperCAmelCase , torch.dtype )
def __snake_case ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_dtype(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import tensorflow as tf
return isinstance(_UpperCAmelCase , tf.Tensor )
def __snake_case ( _UpperCAmelCase ):
return False if not is_tf_available() else _is_tensorflow(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(_UpperCAmelCase )
return type(_UpperCAmelCase ) == tf.Tensor
def __snake_case ( _UpperCAmelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(_UpperCAmelCase , jnp.ndarray )
def __snake_case ( _UpperCAmelCase ):
return False if not is_flax_available() else _is_jax(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return [to_py_obj(_UpperCAmelCase ) for o in obj]
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase ).tolist()
elif isinstance(_UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return np.array(_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase )
else:
return obj
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = fields(self)
# Safety and consistency checks
if not len(__SCREAMING_SNAKE_CASE):
raise ValueError(F'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.')
__a = getattr(self , class_fields[0].name)
__a = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = first_field.items()
__a = True
else:
try:
__a = iter(__SCREAMING_SNAKE_CASE)
__a = True
except TypeError:
__a = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__SCREAMING_SNAKE_CASE):
if (
not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple))
or not len(__SCREAMING_SNAKE_CASE) == 2
or not isinstance(element[0] , __SCREAMING_SNAKE_CASE)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__a = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__a = element[1]
elif first_field is not None:
__a = first_field
else:
for field in class_fields:
__a = getattr(self , field.name)
if v is not None:
__a = v
def __delitem__( self : str , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def _lowerCamelCase ( self : Tuple , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def _lowerCamelCase ( self : Dict , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def _lowerCamelCase ( self : str , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __setitem__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
@classmethod
def _lowerCamelCase ( cls : int , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = '''longest'''
UpperCamelCase__ : Dict = '''max_length'''
UpperCamelCase__ : int = '''do_not_pad'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''pt'''
UpperCamelCase__ : Tuple = '''tf'''
UpperCamelCase__ : int = '''np'''
UpperCamelCase__ : Union[str, Any] = '''jax'''
class _A :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[ContextManager]):
'''simple docstring'''
__a = context_managers
__a = ExitStack()
def __enter__( self : Union[str, Any]):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__SCREAMING_SNAKE_CASE)
def __exit__( self : Dict , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def __snake_case ( _UpperCAmelCase ):
__a = infer_framework(_UpperCAmelCase )
if framework == "tf":
__a = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__a = inspect.signature(model_class.forward ) # PyTorch models
else:
__a = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __snake_case ( _UpperCAmelCase ):
__a = model_class.__name__
__a = infer_framework(_UpperCAmelCase )
if framework == "tf":
__a = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__a = inspect.signature(model_class.forward ) # PyTorch models
else:
__a = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = "" , _UpperCAmelCase = "." ):
def _flatten_dict(_UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="." ):
for k, v in d.items():
__a = str(_UpperCAmelCase ) + delimiter + str(_UpperCAmelCase ) if parent_key else k
if v and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
yield from flatten_dict(_UpperCAmelCase , _UpperCAmelCase , delimiter=_UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
@contextmanager
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=None ):
if is_numpy_array(_UpperCAmelCase ):
return np.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.T if axes is None else array.permute(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(_UpperCAmelCase , perm=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
else:
raise ValueError(f'Type not supported for transpose: {type(_UpperCAmelCase )}.' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.reshape(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.reshape(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(f'Type not supported for reshape: {type(_UpperCAmelCase )}.' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=None ):
if is_numpy_array(_UpperCAmelCase ):
return np.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f'Type not supported for squeeze: {type(_UpperCAmelCase )}.' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.expand_dims(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.unsqueeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f'Type not supported for expand_dims: {type(_UpperCAmelCase )}.' )
def __snake_case ( _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.size(_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.size(_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(_UpperCAmelCase )}.' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
for key, value in auto_map.items():
if isinstance(_UpperCAmelCase , (tuple, list) ):
__a = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__a = f'{repo_id}--{value}'
return auto_map
def __snake_case ( _UpperCAmelCase ):
for base_class in inspect.getmro(_UpperCAmelCase ):
__a = base_class.__module__
__a = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = '''Salesforce/blip-image-captioning-base'''
UpperCamelCase__ : Optional[int] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCamelCase__ : Optional[int] = '''image_captioner'''
UpperCamelCase__ : str = AutoModelForVisionaSeq
UpperCamelCase__ : str = ['''image''']
UpperCamelCase__ : str = ['''text''']
def __init__( self : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : "Image"):
'''simple docstring'''
return self.pre_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.model.generate(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)[0].strip()
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
__a = GPTaTokenizerFast.from_pretrained('''gpt2''')
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
try:
__a = tempfile.mktemp()
with open(__SCREAMING_SNAKE_CASE , '''wb''') as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , __SCREAMING_SNAKE_CASE)
__a = AlbertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
finally:
os.remove(__SCREAMING_SNAKE_CASE)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json'''):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''') as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , __SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''')
@is_staging_test
class _A ( unittest.TestCase ):
UpperCamelCase__ : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _lowerCamelCase ( cls : List[Any]):
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''')
except HTTPError:
pass
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizer(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
__a = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def _lowerCamelCase ( self : Any):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = CustomTokenizer(__SCREAMING_SNAKE_CASE)
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.txt''')
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__a = BertTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
bert_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = CustomTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE)
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__a = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''')
__a = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = Trie()
trie.add('''Hello 友達''')
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
trie.add('''Hello''')
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS] This is a extra_id_100'''])
trie.add('''[CLS]''')
trie.add('''extra_id_1''')
trie.add('''extra_id_100''')
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
self.assertEqual(trie.split('''ABC''') , ['''A''', '''BC'''])
self.assertEqual(trie.split('''BCA''') , ['''BC''', '''A'''])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Trie()
trie.add('''TOKEN]''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = Trie()
trie.add('''A''')
trie.add('''P''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = Trie()
trie.add('''AB''')
trie.add('''B''')
trie.add('''C''')
self.assertEqual(trie.split('''ABC''') , ['''AB''', '''C'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
trie.add('''ABC''')
trie.add('''B''')
trie.add('''CD''')
self.assertEqual(trie.split('''ABCD''') , ['''ABC''', '''D'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = Trie()
__a = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''AB''', '''C'''])
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
from math import factorial
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
__a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__a = float(factorial(_UpperCAmelCase ) )
coefficient /= factorial(_UpperCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.7_5))
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _A ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 88 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "geglu" , __SCREAMING_SNAKE_CASE : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__a = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__a = [1, 0]
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = hidden_states
__a = []
__a = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
__a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__a = self.transformer_index_for_condition[i]
__a = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
__a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE)
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
from itertools import permutations
def __snake_case ( _UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__a = [7, 11, 13, 17]
for i, test in enumerate(_UpperCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __snake_case ( _UpperCAmelCase = 10 ):
return sum(
int(''''''.join(map(_UpperCAmelCase , _UpperCAmelCase ) ) )
for num in permutations(range(_UpperCAmelCase ) )
if is_substring_divisible(_UpperCAmelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
from jiwer import compute_measures
import datasets
__snake_case :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Any = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__snake_case :Union[str, Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Tuple=False):
'''simple docstring'''
if concatenate_texts:
return compute_measures(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)["wer"]
else:
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = compute_measures(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__snake_case :Tuple = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
| 49
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__snake_case :List[Any] = logging.getLogger(__name__)
__snake_case :int = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__snake_case :Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__UpperCAmelCase )} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} ,)
UpperCamelCase__ : str = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''')
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(default=__UpperCAmelCase ,metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase__ : Optional[int] = field(
default=5 ,metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} ,)
UpperCamelCase__ : Optional[int] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} ,)
UpperCamelCase__ : Optional[int] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The number of processes to use for the preprocessing.'''} ,)
UpperCamelCase__ : float = field(
default=0.15 ,metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} ,)
def _lowerCamelCase ( self : int):
'''simple docstring'''
if self.train_file is not None:
__a = self.train_file.split('''.''')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__a = self.validation_file.split('''.''')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
__a = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__a = {c: dataset[c] for c in dataset.column_names}
__a = refs
return Dataset.from_dict(_UpperCAmelCase )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
__a = {}
if data_args.train_file is not None:
__a = data_args.train_file
if data_args.validation_file is not None:
__a = data_args.validation_file
__a = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
__a = '''text'''
__a = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
__a = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
__a = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
__a = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
__a = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
__a = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__a = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__a = datasets['''train'''].column_names
else:
__a = datasets['''validation'''].column_names
__a = '''text''' if '''text''' in column_names else column_names[0]
__a = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase ):
# Remove empty lines
__a = [line for line in examples['''text'''] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
__a = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__a = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__a = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__a = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__a = False
# Data collator
# This one will take care of randomly masking the tokens.
__a = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__a = model_args.model_name_or_path
else:
__a = None
__a = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__a = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
__a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a = trainer.evaluate()
__a = math.exp(eval_output['''eval_loss'''] )
__a = perplexity
__a = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def __snake_case ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 49
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
| 1
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
def __snake_case ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_UpperCAmelCase ) == 26
def __snake_case ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __snake_case ( ):
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=_UpperCAmelCase ) )
print(timeit('''is_pangram_faster()''' , setup=_UpperCAmelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=_UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 * 4 , __SCREAMING_SNAKE_CASE : int=32 * 6 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Tuple=32 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = is_training
__a = use_auxiliary_loss
__a = num_queries
__a = num_channels
__a = min_size
__a = max_size
__a = num_labels
__a = mask_feature_size
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
__SCREAMING_SNAKE_CASE)
__a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE)
__a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE) > 0.5
).float()
__a = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE) > 0.5).long()
__a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = output.encoder_hidden_states
__a = output.pixel_decoder_hidden_states
__a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE) , config.decoder_config.decoder_layers)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False):
'''simple docstring'''
with torch.no_grad():
__a = MaskFormerModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE : Optional[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
__a = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
comm_check_on_output(__SCREAMING_SNAKE_CASE)
__a = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE)
comm_check_on_output(__SCREAMING_SNAKE_CASE)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase__ : Any = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Any = False
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = MaskFormerModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
__a = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = (self.model_tester.min_size,) * 2
__a = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE),
'''mask_labels''': torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE),
'''class_labels''': torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE).long(),
}
__a = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(__SCREAMING_SNAKE_CASE)
__a = model(**__SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE)
__a = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.attentions is not None)
def _lowerCamelCase ( self : str):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.train()
__a = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE).loss
loss.backward()
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = True
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.train()
__a = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE)
__a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
__snake_case :int = 1E-4
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
__a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088))
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
__a = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
__a = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(__SCREAMING_SNAKE_CASE)
.eval()
)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
__a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088))
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# masks_queries_logits
__a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__a = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__a = torch.tensor(__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
# class_queries_logits
__a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__a = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(__SCREAMING_SNAKE_CASE)
.eval()
)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
__a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088))
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# masks_queries_logits
__a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__a = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__a = torch.tensor(__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
# class_queries_logits
__a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__a = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(__SCREAMING_SNAKE_CASE)
.eval()
)
__a = self.default_image_processor
__a = image_processor(
[np.zeros((3, 800, 1_333)), np.zeros((3, 800, 1_333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
__a = inputs['''pixel_values'''].to(__SCREAMING_SNAKE_CASE)
__a = [el.to(__SCREAMING_SNAKE_CASE) for el in inputs['''mask_labels''']]
__a = [el.to(__SCREAMING_SNAKE_CASE) for el in inputs['''class_labels''']]
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
| 49
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : Any=24 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[int]=6 , __SCREAMING_SNAKE_CASE : List[Any]=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[str]=1_000 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = range_bbox
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = None
if self.use_input_mask:
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
__a = LiltModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__a = self.num_labels
__a = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any = False
UpperCamelCase__ : Optional[Any] = False
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
return True
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = LiltModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_torch
@slow
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(__SCREAMING_SNAKE_CASE)
__a = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE)
__a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE)
__a = torch.Size([1, 2, 768])
__a = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = '''ZinengTang/tvlt-base'''
__a = tempfile.mkdtemp()
def _lowerCamelCase ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
processor.save_pretrained(self.tmpdirname)
__a = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([12_000])
__a = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(audio=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([3, 224, 224])
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([12_000])
__a = np.ones([3, 224, 224])
__a = processor(audio=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 49
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {'''shortest_edge''': 30}
__a = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize_and_center_crop
__a = size
__a = crop_pct
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = PoolFormerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = PoolFormerImageProcessingTester(self)
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize_and_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''crop_pct'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 30})
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30})
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class _A :
def __init__( self : Optional[Any]):
'''simple docstring'''
__a = {}
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = {}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE)
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE)
__a = probability
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return list(self.connections)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(_UpperCAmelCase ):
__a = graph.transition(_UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49
| 1
|
from __future__ import annotations
import numpy as np
def __snake_case ( _UpperCAmelCase ):
return np.maximum(0 , _UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 49
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = eval_examples
__a = post_process_function
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
__a = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
__a = gen_kwargs
__a = self.eval_dataset if eval_dataset is None else eval_dataset
__a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
__a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
__a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = gen_kwargs.copy()
__a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
__a = self.compute_metrics
__a = None
__a = time.time()
__a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
__a = compute_metrics
__a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''')
__a = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'{metric_key_prefix}_'):
__a = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case :Tuple = 16
__snake_case :Tuple = 32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = "bert-base-cased" ):
__a = AutoTokenizer.from_pretrained(_UpperCAmelCase )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
model.eval()
__a = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**_UpperCAmelCase )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_UpperCAmelCase ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__a = metric.compute()
return eval_metric["accuracy"]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(_UpperCAmelCase )
__a , __a = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=_UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=0 , num_training_steps=_UpperCAmelCase , )
else:
__a = DummyScheduler(_UpperCAmelCase , total_num_steps=_UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = num_epochs
if args.partial_train_epoch is not None:
__a = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__a = args.resume_from_checkpoint.split('''epoch_''' )[1]
__a = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__a = int(_UpperCAmelCase ) + 1
__a = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.print('''resumed checkpoint performance:''' , _UpperCAmelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__a = json.load(_UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__a = {}
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
__a = model(**_UpperCAmelCase )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__a = f'epoch_{epoch}'
__a = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
__a = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a = accuracy
__a = lr_scheduler.get_lr()[0]
__a = optimizer.param_groups[0]['''lr''']
__a = epoch
__a = overall_step
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( ):
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=_UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_UpperCAmelCase , default=2 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 49
|
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase ):
if not postfix_notation:
return 0
__a = {'''+''', '''-''', '''*''', '''/'''}
__a = []
for token in postfix_notation:
if token in operations:
__a , __a = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_UpperCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Dict = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__a = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
__a = in_proj_weight[
: encoder_config.hidden_size, :
]
__a = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__a = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = dct.pop(_UpperCAmelCase )
__a = val
def __snake_case ( _UpperCAmelCase ):
if "handwritten" in checkpoint_url:
__a = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__a = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase )
__a = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__a = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__a = 1024
__a = 4096
__a = 24
__a = 16
__a = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__a = False
__a = '''relu'''
__a = 1024
__a = True
__a = False
__a = False
# load HuggingFace model
__a = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
__a = TrOCRForCausalLM(_UpperCAmelCase )
__a = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
__a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='''cpu''' , check_hash=_UpperCAmelCase )['''model''']
__a = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__a = state_dict.pop(_UpperCAmelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__a = val
else:
__a = val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
__a = ViTImageProcessor(size=encoder_config.image_size )
__a = RobertaTokenizer.from_pretrained('''roberta-large''' )
__a = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
__a = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors='''pt''' ).pixel_values
# verify logits
__a = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__a = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a = outputs.logits
__a = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__a = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__a = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__a = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__a = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__snake_case :List[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[Any] = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__snake_case :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''conditional_detr'''
UpperCamelCase__ : List[Any] = ['''past_key_values''']
UpperCamelCase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Optional[int]=300 , __SCREAMING_SNAKE_CASE : Union[str, Any]=6 , __SCREAMING_SNAKE_CASE : Dict=2_048 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Any=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]="sine" , __SCREAMING_SNAKE_CASE : int="resnet50" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.25 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = backbone_config.get('''model_type''')
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__SCREAMING_SNAKE_CASE)
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = cls_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return self.d_model
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return 1E-5
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return 12
| 49
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class _A ( __UpperCAmelCase ):
def __init__( self : int , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = {}
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = super().add_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
''' `placeholder_token` that is not already in the tokenizer.''')
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=1 , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = []
if num_vec_per_token == 1:
self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
output.append(__SCREAMING_SNAKE_CASE)
else:
__a = []
for i in range(__SCREAMING_SNAKE_CASE):
__a = placeholder_token + F'_{i}'
self.try_adding_tokens(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
output.append(__SCREAMING_SNAKE_CASE)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent')
__a = output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=1.0):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = []
for i in range(len(__SCREAMING_SNAKE_CASE)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__SCREAMING_SNAKE_CASE))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__a = self.token_map[placeholder_token]
__a = tokens[: 1 + int(len(__SCREAMING_SNAKE_CASE) * prop_tokens_to_load)]
if vector_shuffle:
__a = copy.copy(__SCREAMING_SNAKE_CASE)
random.shuffle(__SCREAMING_SNAKE_CASE)
__a = text.replace(__SCREAMING_SNAKE_CASE , ''' '''.join(__SCREAMING_SNAKE_CASE))
return text
def __call__( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=1.0 , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , *__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__SCREAMING_SNAKE_CASE , vector_shuffle=__SCREAMING_SNAKE_CASE , prop_tokens_to_load=__SCREAMING_SNAKE_CASE) , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _A :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=24 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=2 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = patch_size
__a = max_length
__a = num_mel_bins
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = frequency_stride
__a = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__a = (self.max_length - self.patch_size) // self.time_stride + 1
__a = frequency_out_dimension * time_out_dimension
__a = num_patches + 2
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = ASTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : int = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = ASTModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''input_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__a , __a = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.default_feature_extractor
__a = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_feature_extractor
__a , __a = prepare_audio()
__a = audio.squeeze().numpy()
__a = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49
| 1
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
__snake_case :int = logging.get_logger(__name__)
__snake_case :str = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
__a = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _UpperCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__a = os.path.join(get_home_dir() , '''models''' )
__a = _load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase )
__a = nlp.model.BERTModel(
_UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , )
original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase )
__a = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_UpperCAmelCase ),
}
__a = BertConfig.from_dict(_UpperCAmelCase )
__a = BertForMaskedLM(_UpperCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_UpperCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_UpperCAmelCase , _UpperCAmelCase ):
__a = hf_param.shape
__a = to_torch(params[gluon_param] )
__a = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__a = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__a = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a = hf_bort_model.bert.encoder.layer[i]
# self attention
__a = layer.attention.self
__a = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__a = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__a = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__a = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__a = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__a = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__a = layer.attention.output
__a = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
__a = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
__a = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
__a = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__a = layer.intermediate
__a = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__a = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__a = layer.output
__a = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__a = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__a = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__a = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a = RobertaTokenizer.from_pretrained('''roberta-base''' )
__a = tokenizer.encode_plus(_UpperCAmelCase )['''input_ids''']
# Get gluon output
__a = mx.nd.array([input_ids] )
__a = original_bort(inputs=_UpperCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCAmelCase )
__a = BertModel.from_pretrained(_UpperCAmelCase )
hf_bort_model.eval()
__a = tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' )
__a = hf_bort_model(**_UpperCAmelCase )[0]
__a = output_gluon[0].asnumpy()
__a = output_hf[0].detach().numpy()
__a = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a = np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Union[str, Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 49
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0)
__a = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__a = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__a = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''')
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = len(__SCREAMING_SNAKE_CASE)
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0)
for t in reversed(range(__SCREAMING_SNAKE_CASE)):
# 1. predict noise residual
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__a = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
__a = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE):
if i == len(__SCREAMING_SNAKE_CASE) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE)
__a = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [100, 87, 50, 1, 0]
__a = len(__SCREAMING_SNAKE_CASE)
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**__SCREAMING_SNAKE_CASE)
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE)
| 49
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case :Any = logging.getLogger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = '''masked_bert'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1E-12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="topK" , __SCREAMING_SNAKE_CASE : List[Any]="constant" , __SCREAMING_SNAKE_CASE : int=0.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 49
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__snake_case :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
__a = {}
__a = {}
# preprocess args
if "points_per_batch" in kwargs:
__a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.size['''longest_edge''']
__a , __a , __a , __a = self.image_processor.generate_crop_boxes(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__a = self.get_inference_context()
with inference_context():
__a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__a = image_embeddings
__a = grid_points.shape[1]
__a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = grid_points[:, i : i + points_per_batch, :, :]
__a = input_labels[:, i : i + points_per_batch]
__a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ):
'''simple docstring'''
__a = model_inputs.pop('''input_boxes''')
__a = model_inputs.pop('''is_last''')
__a = model_inputs.pop('''original_sizes''').tolist()
__a = model_inputs.pop('''reshaped_input_sizes''').tolist()
__a = self.model(**__SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a = model_outputs['''pred_masks''']
__a = self.image_processor.post_process_masks(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE)
__a = model_outputs['''iou_scores''']
__a , __a , __a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ):
'''simple docstring'''
__a = []
__a = []
__a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a = torch.cat(__SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = self.image_processor.post_process_for_mask_generation(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__SCREAMING_SNAKE_CASE)
__a = {}
if output_rle_mask:
__a = rle_mask
if output_bboxes_mask:
__a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 49
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :List[Any] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
__snake_case :List[Any] = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''deit'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : int=3_072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : Optional[int]=224 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Any=16 , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = encoder_stride
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return 1E-4
| 49
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__snake_case :List[Any] = '''src/diffusers'''
# Matches is_xxx_available()
__snake_case :Optional[Any] = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__snake_case :Optional[int] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__snake_case :Optional[Any] = '''
{0} = None
'''
__snake_case :Tuple = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__snake_case :int = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __snake_case ( _UpperCAmelCase ):
__a = _re_backend.findall(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
return "_and_".join(_UpperCAmelCase )
def __snake_case ( ):
with open(os.path.join(_UpperCAmelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Get to the point we do the actual imports for type checking
__a = 0
__a = {}
# Go through the end of the file
while line_index < len(_UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__a = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1:
__a = lines[line_index]
__a = _re_single_line_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCAmelCase ) > 0:
__a = objects
else:
line_index += 1
return backend_specific_objects
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase )
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase=None ):
if backend_specific_objects is None:
__a = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__a = {}
for backend, objects in backend_specific_objects.items():
__a = '''[''' + ''', '''.join(f'"{b}"' for b in backend.split('''_and_''' ) ) + ''']'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] )
__a = dummy_file
return dummy_files
def __snake_case ( _UpperCAmelCase=False ):
__a = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__a = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__a = os.path.join(_UpperCAmelCase , '''utils''' )
__a = {
backend: os.path.join(_UpperCAmelCase , f'dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py' )
for backend in dummy_files.keys()
}
__a = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.read()
else:
__a = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main '
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f'diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` '
'''to fix this.''' )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case :List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 49
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.