code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = '''decision_transformer'''
__UpperCAmelCase : Any = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=17 , a_=4 , a_=128 , a_=4096 , a_=True , a_=1 , a_=1024 , a_=3 , a_=1 , a_=None , a_="relu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=1E-5 , a_=0.02 , a_=True , a_=True , a_=5_0256 , a_=5_0256 , a_=False , a_=False , **a_ , ):
lowerCamelCase_ : Optional[int] = state_dim
lowerCamelCase_ : str = act_dim
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : Any = max_ep_len
lowerCamelCase_ : Optional[Any] = action_tanh
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : List[Any] = n_positions
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : Optional[Any] = n_inner
lowerCamelCase_ : List[Any] = activation_function
lowerCamelCase_ : Union[str, Any] = resid_pdrop
lowerCamelCase_ : Union[str, Any] = embd_pdrop
lowerCamelCase_ : str = attn_pdrop
lowerCamelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Tuple = scale_attn_weights
lowerCamelCase_ : Dict = use_cache
lowerCamelCase_ : str = scale_attn_by_inverse_layer_idx
lowerCamelCase_ : Dict = reorder_and_upcast_attn
lowerCamelCase_ : str = bos_token_id
lowerCamelCase_ : Tuple = eos_token_id
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''gptj'''
__UpperCAmelCase : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=5_0400 , a_=2048 , a_=4096 , a_=28 , a_=16 , a_=64 , a_=None , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=1E-5 , a_=0.02 , a_=True , a_=5_0256 , a_=5_0256 , a_=False , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Tuple = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Tuple = n_head
lowerCamelCase_ : Optional[Any] = n_inner
lowerCamelCase_ : Optional[Any] = rotary_dim
lowerCamelCase_ : Dict = activation_function
lowerCamelCase_ : List[str] = resid_pdrop
lowerCamelCase_ : List[str] = embd_pdrop
lowerCamelCase_ : Tuple = attn_pdrop
lowerCamelCase_ : Optional[int] = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : int = use_cache
lowerCamelCase_ : int = bos_token_id
lowerCamelCase_ : List[Any] = eos_token_id
super().__init__(
bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ = "default" , a_ = None , a_ = False , ):
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , "pad_token_id" , a_ ):
# TODO: how to do that better?
lowerCamelCase_ : int = 0
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCamelCase ( self ):
return self._config.n_layer
@property
def _UpperCamelCase ( self ):
return self._config.n_head
def _UpperCamelCase ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
lowerCamelCase_ : str = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase_ : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase_ : Tuple = seqlen + 2
lowerCamelCase_ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ : List[str] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCamelCase_ : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase_ : Optional[int] = ordered_inputs["attention_mask"].dtype
lowerCamelCase_ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ):
return 13
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None):
'''simple docstring'''
if version.parse(hfh.__version__).release < version.parse("0.11.0").release:
# old versions of hfh don't url-encode the file path
lowerCamelCase_ : List[Any] = quote(lowerCAmelCase_)
return hfh.hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" , revision=lowerCAmelCase_)
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = 10
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = [1, 2, 3, 4]
lowerCamelCase_ : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a_ , self.block_size , 0 ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
lowerCamelCase_ : Optional[Any] = process_story(a_ )
self.assertEqual(a_ , [] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ""
lowerCamelCase_ : Optional[int] = process_story(a_ )
self.assertEqual(a_ , [] )
self.assertEqual(a_ , [] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
lowerCamelCase_ : Tuple = process_story(a_ )
lowerCamelCase_ : List[Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(a_ , a_ )
lowerCamelCase_ : Dict = ["It was the best of times."]
self.assertEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
lowerCamelCase_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a_ , 0 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase_ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a_ , 23 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase_ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a_ , 1 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = 101
lowerCamelCase_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCamelCase_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase_ : Optional[Any] = compute_token_type_ids(a_ , a_ )
np.testing.assert_array_equal(a_ , a_ )
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__magic_name__ = get_logger(__name__)
__magic_name__ = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class lowerCAmelCase__ :
"""simple docstring"""
@add_start_docstrings(a_ )
def __call__( self , a_ , a_ ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase__ :
"""simple docstring"""
@add_start_docstrings(a_ )
def __call__( self , a_ , a_ ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@add_start_docstrings(a_ )
def __call__( self , a_ , a_ , a_ , **a_ ):
for processor in self:
lowerCamelCase_ : int = inspect.signature(processor.__call__ ).parameters
if len(a_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
lowerCamelCase_ : Union[str, Any] = processor(a_ , a_ , a_ , **a_ )
else:
lowerCamelCase_ : List[str] = processor(a_ , a_ , a_ )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
if not isinstance(a_ , a_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
lowerCamelCase_ : int = temperature
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : str = scores / self.temperature
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ = -float("Inf" ) , a_ = 1 ):
if not isinstance(a_ , a_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(a_ , a_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
lowerCamelCase_ : Tuple = top_p
lowerCamelCase_ : Union[str, Any] = filter_value
lowerCamelCase_ : Any = min_tokens_to_keep
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : str = lax.top_k(a_ , scores.shape[-1] )
lowerCamelCase_ : Optional[Any] = jnp.full_like(a_ , self.filter_value )
lowerCamelCase_ : str = jax.nn.softmax(a_ , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase_ : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase_ : Optional[int] = jnp.roll(a_ , 1 )
score_mask |= score_mask.at[:, 0].set(a_ )
# min tokens to keep
lowerCamelCase_ : str = score_mask.at[:, : self.min_tokens_to_keep].set(a_ )
lowerCamelCase_ : Union[str, Any] = jnp.where(a_ , a_ , a_ )
lowerCamelCase_ : Any = jax.lax.sort_key_val(a_ , a_ )[-1]
return next_scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ = -float("Inf" ) , a_ = 1 ):
if not isinstance(a_ , a_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
lowerCamelCase_ : Any = max(a_ , a_ )
lowerCamelCase_ : int = filter_value
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[int] = scores.shape
lowerCamelCase_ : str = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase_ : int = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase_ : List[str] = lax.top_k(a_ , a_ )
lowerCamelCase_ : Tuple = jnp.broadcast_to((jnp.arange(a_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase_ : Optional[int] = topk_scores.flatten()
lowerCamelCase_ : Tuple = topk_indices.flatten() + shift
lowerCamelCase_ : List[Any] = next_scores_flat.at[topk_indices_flat].set(a_ )
lowerCamelCase_ : List[Any] = next_scores_flat.reshape(a_ , a_ )
return next_scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Optional[Any] = bos_token_id
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[str] = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase_ : Any = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase_ : Optional[int] = jnp.where(a_ , new_scores.at[:, self.bos_token_id].set(0 ) , a_ )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ ):
lowerCamelCase_ : List[Any] = max_length
lowerCamelCase_ : int = eos_token_id
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[str] = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase_ : List[str] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase_ : Union[str, Any] = jnp.where(a_ , new_scores.at[:, self.eos_token_id].set(0 ) , a_ )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ ):
if not isinstance(a_ , a_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(a_ , a_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
lowerCamelCase_ : Dict = min_length
lowerCamelCase_ : Tuple = eos_token_id
def __call__( self , a_ , a_ , a_ ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase_ : Any = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase_ : Optional[int] = jnp.where(a_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , a_ )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ ):
lowerCamelCase_ : int = list(a_ )
lowerCamelCase_ : List[str] = begin_index
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase_ : List[Any] = jnp.where(a_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , a_ )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : List[Any] = list(a_ )
def __call__( self , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Optional[int] = dict(a_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase_ : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase_ : Optional[int] = force_token_array.at[index].set(a_ )
lowerCamelCase_ : Union[str, Any] = jnp.intaa(a_ )
def __call__( self , a_ , a_ , a_ ):
def _force_token(a_ ):
lowerCamelCase_ : Tuple = scores.shape[0]
lowerCamelCase_ : Tuple = self.force_token_array[generation_idx]
lowerCamelCase_ : Optional[int] = jnp.ones_like(a_ , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase_ : Any = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase_ : int = lax.dynamic_update_slice(a_ , a_ , (0, current_token) )
return new_scores
lowerCamelCase_ : str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(a_ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = generate_config.eos_token_id
lowerCamelCase_ : Union[str, Any] = generate_config.no_timestamps_token_id
lowerCamelCase_ : str = generate_config.no_timestamps_token_id + 1
lowerCamelCase_ : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(a_ , "max_initial_timestamp_index" ):
lowerCamelCase_ : Tuple = generate_config.max_initial_timestamp_index
else:
lowerCamelCase_ : Union[str, Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase_ : Optional[int] = model_config.vocab_size
def __call__( self , a_ , a_ , a_ ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase_ : Any = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(a_ , a_ ):
lowerCamelCase_ : Dict = jnp.where((cur_len - self.begin_index) >= 1 , a_ , a_ )
lowerCamelCase_ : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a_ , )
lowerCamelCase_ : int = jnp.where((cur_len - self.begin_index) < 2 , a_ , a_ )
lowerCamelCase_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , a_ , a_ , )
return jnp.where(
a_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , a_ , )
lowerCamelCase_ : str = jax.vmap(a_ )(a_ , a_ )
lowerCamelCase_ : Optional[int] = jnp.where(cur_len == self.begin_index , a_ , a_ )
lowerCamelCase_ : str = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a_ , )
lowerCamelCase_ : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase_ : List[Any] = jnp.where(
a_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , a_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase_ : int = jax.nn.log_softmax(a_ , axis=-1 )
def handle_cumulative_probs(a_ , a_ ):
lowerCamelCase_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase_ : str = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , a_ , )
lowerCamelCase_ : int = jax.vmap(a_ )(a_ , a_ )
return scores
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = IFPipeline
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ):
return self._get_dummy_components()
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : int = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ):
self._test_save_load_local()
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
# if
lowerCamelCase_ : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCamelCase_ : Optional[Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCamelCase_ : List[Any] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase_ : int = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Dict = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : int = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Any = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Any = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def __magic_name__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = len(lowerCAmelCase_)
lowerCamelCase_ : List[str] = []
for i in range(len(lowerCAmelCase_) - pat_len + 1):
lowerCamelCase_ : Optional[Any] = True
for j in range(lowerCAmelCase_):
if s[i + j] != pattern[j]:
lowerCamelCase_ : Optional[Any] = False
break
if match_found:
position.append(lowerCAmelCase_)
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : List[Any] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : str = use_token_type_ids
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Dict = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Dict = num_labels
lowerCamelCase_ : Dict = num_choices
lowerCamelCase_ : int = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_input_mask:
lowerCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , use_stable_embedding=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : int = OpenLlamaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : int = model(a_ , attention_mask=a_ )
lowerCamelCase_ : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : str = OpenLlamaModel(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Any = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowerCamelCase_ : str = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
lowerCamelCase_ : int = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Tuple = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : int = True
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : List[Any] = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
lowerCamelCase_ : Union[str, Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
lowerCamelCase_ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : Any = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
lowerCamelCase_ : Union[str, Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
lowerCamelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Tuple = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = OpenLlamaModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] = 3
lowerCamelCase_ : Dict = input_dict["input_ids"]
lowerCamelCase_ : str = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = 3
lowerCamelCase_ : List[str] = "single_label_classification"
lowerCamelCase_ : int = input_dict["input_ids"]
lowerCamelCase_ : Tuple = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : List[Any] = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Any = 3
lowerCamelCase_ : List[Any] = "multi_label_classification"
lowerCamelCase_ : List[str] = input_dict["input_ids"]
lowerCamelCase_ : List[Any] = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ : str = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def _UpperCamelCase ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Dict = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Union[str, Any] = OpenLlamaModel(a_ )
original_model.to(a_ )
original_model.eval()
lowerCamelCase_ : Union[str, Any] = original_model(a_ ).last_hidden_state
lowerCamelCase_ : str = original_model(a_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : str = {"type": scaling_type, "factor": 10.0}
lowerCamelCase_ : Optional[Any] = OpenLlamaModel(a_ )
scaled_model.to(a_ )
scaled_model.eval()
lowerCamelCase_ : Tuple = scaled_model(a_ ).last_hidden_state
lowerCamelCase_ : List[Any] = scaled_model(a_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a_ , a_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self , a_ , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _UpperCamelCase ( self , a_ , a_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self , a_ , a_ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _UpperCamelCase ( self , a_ , a_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def __magic_name__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])]
def __magic_name__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])]
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@require_beam
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ : List[Any] = DummyBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase_ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , a_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self ):
import apache_beam as beam
lowerCamelCase_ : Optional[Any] = beam.io.parquetio.WriteToParquet
lowerCamelCase_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ : Tuple = DummyBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowerCamelCase_ : Optional[int] = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , a_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ : Optional[int] = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase_ : str = NestedBeamDataset(cache_dir=a_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowerCamelCase_ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , a_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , a_ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=[2, 3, 4] , a_=None , ):
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : List[str] = image_size
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : Optional[int] = num_stages
lowerCamelCase_ : List[Any] = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : int = is_training
lowerCamelCase_ : Any = use_labels
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : List[Any] = num_labels
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : str = out_features
lowerCamelCase_ : Dict = out_indices
lowerCamelCase_ : Any = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
if self.use_labels:
lowerCamelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : int = ConvNextModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[Any] = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[str] = ConvNextForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[int] = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[Any] = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[Any] = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.prepare_config_and_inputs()
lowerCamelCase_ : Union[str, Any] = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = ConvNextModelTester(self )
lowerCamelCase_ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = model_class(a_ )
lowerCamelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def _UpperCamelCase ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Tuple = model(**self._prepare_for_class(a_ , a_ ) )
lowerCamelCase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[str] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Any = ConvNextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a_ )
lowerCamelCase_ : Optional[int] = self.default_image_processor
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : List[str] = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : str = model(**a_ )
# verify the logits
lowerCamelCase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase_ : List[str] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase, __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict = ConvNextConfig
__UpperCAmelCase : List[Any] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = ConvNextModelTester(self )
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1E-12 , lowerCAmelCase_ = 100 , ):
'''simple docstring'''
assert np.shape(lowerCAmelCase_)[0] == np.shape(lowerCAmelCase_)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase_)[0] == np.shape(lowerCAmelCase_)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase_) == np.iscomplexobj(lowerCAmelCase_)
lowerCamelCase_ : Tuple = np.iscomplexobj(lowerCAmelCase_)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase_ , input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ : Tuple = np.dot(lowerCAmelCase_ , lowerCAmelCase_)
# Normalize the resulting output vector.
lowerCamelCase_ : str = w / np.linalg.norm(lowerCAmelCase_)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase_ : Union[str, Any] = np.dot(lowerCAmelCase_ , np.dot(lowerCAmelCase_ , lowerCAmelCase_))
# Check convergence.
lowerCamelCase_ : str = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Tuple = lambda_
if is_complex:
lowerCamelCase_ : Any = np.real(lambda_)
return lambda_, vector
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
lowerCamelCase_ : int = np.array([41, 4, 20])
lowerCamelCase_ : Union[str, Any] = real_input_matrix.astype(np.complexaaa)
lowerCamelCase_ : List[Any] = np.triu(1J * complex_input_matrix , 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ : str = np.array([41, 4, 20]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ : List[Any] = real_input_matrix
lowerCamelCase_ : Union[str, Any] = real_vector
elif problem_type == "complex":
lowerCamelCase_ : Tuple = complex_input_matrix
lowerCamelCase_ : Any = complex_vector
# Our implementation.
lowerCamelCase_ : str = power_iteration(lowerCAmelCase_ , lowerCAmelCase_)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ : int = np.linalg.eigh(lowerCAmelCase_)
# Last eigenvalue is the maximum one.
lowerCamelCase_ : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase_) - np.abs(lowerCAmelCase_)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Dict = len(lowerCAmelCase_) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_):
return None
lowerCamelCase_ : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase_ : str = left
lowerCamelCase_ : List[str] = point
elif point > right:
lowerCamelCase_ : Any = right
lowerCamelCase_ : Union[str, Any] = point
else:
if item < current_item:
lowerCamelCase_ : Optional[int] = point - 1
else:
lowerCamelCase_ : int = point + 1
return None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , point - 1)
else:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , point + 1 , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if collection != sorted(lowerCAmelCase_):
raise ValueError("Collection must be ascending sorted")
return True
if __name__ == "__main__":
import sys
__magic_name__ = 0
if debug == 1:
__magic_name__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__magic_name__ = 67
__magic_name__ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__magic_name__ = pytest.mark.integration
@require_faiss
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
lowerCamelCase_ : Dict = dset.map(
lambda a_ , a_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a_ , keep_in_memory=a_ )
lowerCamelCase_ : Optional[int] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ : str = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ : Optional[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ : Optional[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self ):
from elasticsearch import Elasticsearch
lowerCamelCase_ : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ : Optional[Any] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ : Dict = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=a_ )
lowerCamelCase_ : str = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Tuple = index.search(a_ )
self.assertRaises(a_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ : int = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ : Any = index.search_batch(a_ )
self.assertRaises(a_ , index.search_batch , queries[0] )
lowerCamelCase_ : int = [scores[0] for scores in total_scores]
lowerCamelCase_ : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a_ )
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Tuple = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ : str = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a_ ):
lowerCamelCase_ : int = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Union[str, Any] = faiss.IndexFlat(5 )
lowerCamelCase_ : Optional[int] = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self ):
import faiss
lowerCamelCase_ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ : Tuple = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Tuple = index.search(a_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
import faiss
lowerCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
lowerCamelCase_ : Optional[Any] = "index.faiss"
lowerCamelCase_ : int = F"""mock://{index_name}"""
index.save(lowerCAmelCase_ , storage_options=mockfs.storage_options)
lowerCamelCase_ : str = FaissIndex.load(lowerCAmelCase_ , storage_options=mockfs.storage_options)
lowerCamelCase_ : Any = np.zeros(5 , dtype=np.floataa)
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : Union[str, Any] = index.search(lowerCAmelCase_)
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ : Optional[Any] = Elasticsearch()
lowerCamelCase_ : Tuple = {"acknowledged": True}
lowerCamelCase_ : List[str] = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ : List[str] = "foo"
lowerCamelCase_ : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ : Union[str, Any] = index.search(a_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ : List[Any] = "foo"
lowerCamelCase_ : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ : Optional[int] = index.search(a_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ : Union[str, Any] = ["foo", "bar", "foobar"]
lowerCamelCase_ : Any = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ : str = index.search_batch(a_ )
lowerCamelCase_ : Optional[int] = [scores[0] for scores in total_scores]
lowerCamelCase_ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
# batched queries with timeout
lowerCamelCase_ : int = ["foo", "bar", "foobar"]
lowerCamelCase_ : int = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ : Union[str, Any] = index.search_batch(a_ , request_timeout=30 )
lowerCamelCase_ : Tuple = [scores[0] for scores in total_scores]
lowerCamelCase_ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ) , 0 )
self.assertListEqual([1, 1, 1] , a_ )
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline
__UpperCAmelCase : Tuple = TEXT_TO_AUDIO_PARAMS
__UpperCAmelCase : Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCAmelCase : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=a_ , )
lowerCamelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowerCamelCase_ : str = ClapTextModelWithProjection(a_ )
lowerCamelCase_ : Any = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
lowerCamelCase_ : Optional[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=a_ , )
lowerCamelCase_ : str = SpeechTaHifiGan(a_ )
lowerCamelCase_ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : str = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = AudioLDMPipeline(**a_ )
lowerCamelCase_ : Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Optional[int] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = audioldm_pipe(**a_ )
lowerCamelCase_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) == 256
lowerCamelCase_ : Union[str, Any] = audio[:10]
lowerCamelCase_ : Union[str, Any] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Optional[Any] = AudioLDMPipeline(**a_ )
lowerCamelCase_ : List[Any] = audioldm_pipe.to(a_ )
lowerCamelCase_ : Dict = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Tuple = audioldm_pipe(**a_ )
lowerCamelCase_ : Optional[int] = output.audios[0]
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Optional[Any] = audioldm_pipe.tokenizer(
a_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="pt" , )
lowerCamelCase_ : Optional[Any] = text_inputs["input_ids"].to(a_ )
lowerCamelCase_ : Union[str, Any] = audioldm_pipe.text_encoder(
a_ , )
lowerCamelCase_ : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase_ : Optional[Any] = F.normalize(a_ , dim=-1 )
lowerCamelCase_ : Optional[Any] = prompt_embeds
# forward
lowerCamelCase_ : List[Any] = audioldm_pipe(**a_ )
lowerCamelCase_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : Tuple = AudioLDMPipeline(**a_ )
lowerCamelCase_ : Dict = audioldm_pipe.to(a_ )
lowerCamelCase_ : Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Union[str, Any] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Dict = 3 * ["this is a negative prompt"]
lowerCamelCase_ : Optional[int] = negative_prompt
lowerCamelCase_ : List[str] = 3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[Any] = audioldm_pipe(**a_ )
lowerCamelCase_ : Dict = output.audios[0]
lowerCamelCase_ : Dict = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = 3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Dict = []
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Dict = audioldm_pipe.tokenizer(
a_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="pt" , )
lowerCamelCase_ : str = text_inputs["input_ids"].to(a_ )
lowerCamelCase_ : Optional[Any] = audioldm_pipe.text_encoder(
a_ , )
lowerCamelCase_ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase_ : List[str] = F.normalize(a_ , dim=-1 )
embeds.append(a_ )
lowerCamelCase_ : List[str] = embeds
# forward
lowerCamelCase_ : Dict = audioldm_pipe(**a_ )
lowerCamelCase_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCamelCase_ : List[Any] = AudioLDMPipeline(**a_ )
lowerCamelCase_ : Union[str, Any] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Union[str, Any] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : List[str] = "egg cracking"
lowerCamelCase_ : Tuple = audioldm_pipe(**a_ , negative_prompt=a_ )
lowerCamelCase_ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) == 256
lowerCamelCase_ : Optional[Any] = audio[:10]
lowerCamelCase_ : List[str] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[Any] = self.get_dummy_components()
lowerCamelCase_ : int = PNDMScheduler(skip_prk_steps=a_ )
lowerCamelCase_ : int = AudioLDMPipeline(**a_ )
lowerCamelCase_ : Dict = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[str] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase_ : Any = audioldm_pipe(a_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase_ : List[str] = 2
lowerCamelCase_ : Any = audioldm_pipe(a_ , num_inference_steps=2 , num_waveforms_per_prompt=a_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase_ : Any = 2
lowerCamelCase_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=a_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Optional[Any] = AudioLDMPipeline(**a_ )
lowerCamelCase_ : List[str] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase_ : Union[str, Any] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **a_ )
lowerCamelCase_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase_ : Dict = audioldm_pipe(audio_length_in_s=0.0_32 , **a_ )
lowerCamelCase_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(a_ ) / vocoder_sampling_rate == 0.0_32
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Tuple = AudioLDMPipeline(**a_ )
lowerCamelCase_ : Tuple = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = ["hey"]
lowerCamelCase_ : int = audioldm_pipe(a_ , num_inference_steps=1 )
lowerCamelCase_ : str = output.audios.shape
assert audio_shape == (1, 256)
lowerCamelCase_ : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase_ : Tuple = SpeechTaHifiGan(a_ ).to(a_ )
lowerCamelCase_ : Optional[int] = audioldm_pipe(a_ , num_inference_steps=1 )
lowerCamelCase_ : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ )
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=a_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , a_ , a_="cpu" , a_=torch.floataa , a_=0 ):
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Optional[Any] = np.random.RandomState(a_ ).standard_normal((1, 8, 128, 16) )
lowerCamelCase_ : Dict = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
lowerCamelCase_ : Optional[Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase_ : Any = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Tuple = self.get_inputs(a_ )
lowerCamelCase_ : Dict = 25
lowerCamelCase_ : Optional[Any] = audioldm_pipe(**a_ ).audios[0]
assert audio.ndim == 1
assert len(a_ ) == 8_1920
lowerCamelCase_ : Tuple = audio[7_7230:7_7240]
lowerCamelCase_ : str = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase_ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase_ : Tuple = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase_ : List[str] = audioldm_pipe.to(a_ )
audioldm_pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_inputs(a_ )
lowerCamelCase_ : str = audioldm_pipe(**a_ ).audios[0]
assert audio.ndim == 1
assert len(a_ ) == 8_1920
lowerCamelCase_ : Optional[int] = audio[2_7780:2_7790]
lowerCamelCase_ : Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = False , ):
super().__init__()
lowerCamelCase_ : Optional[int] = nn.Embedding(a_ , a_ )
lowerCamelCase_ : int = nn.Embedding(a_ , a_ )
lowerCamelCase_ : Any = False
lowerCamelCase_ : Any = nn.Dropout(p=a_ )
lowerCamelCase_ : Union[str, Any] = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowerCamelCase_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(a_ ):
lowerCamelCase_ : List[str] = TaBlock(a_ )
self.encoders.append(a_ )
lowerCamelCase_ : List[Any] = TaLayerNorm(a_ )
lowerCamelCase_ : Optional[Any] = nn.Dropout(p=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Tuple = self.token_embedder(a_ )
lowerCamelCase_ : int = encoder_input_tokens.shape[1]
lowerCamelCase_ : int = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowerCamelCase_ : int = self.dropout_pre(a_ )
# inverted the attention mask
lowerCamelCase_ : List[str] = encoder_input_tokens.size()
lowerCamelCase_ : Tuple = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowerCamelCase_ : List[Any] = lyr(a_ , a_ )[0]
lowerCamelCase_ : str = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
import numpy as np
import datasets
__magic_name__ = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__magic_name__ = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__magic_name__ = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _UpperCamelCase ( self , a_ , a_ ):
# convert to numpy arrays
lowerCamelCase_ : Any = np.array(a_ )
lowerCamelCase_ : str = np.array(a_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
lowerCamelCase_ : Optional[int] = X - np.mean(a_ )
lowerCamelCase_ : Dict = np.cov(reference_distribution.T )
try:
lowerCamelCase_ : Dict = np.linalg.inv(a_ )
except np.linalg.LinAlgError:
lowerCamelCase_ : Optional[int] = np.linalg.pinv(a_ )
lowerCamelCase_ : Optional[Any] = np.dot(a_ , a_ )
lowerCamelCase_ : Tuple = np.dot(a_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import unittest
from transformers import DonutProcessor
__magic_name__ = '''naver-clova-ix/donut-base'''
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = DonutProcessor.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowerCamelCase_ : Tuple = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowerCamelCase_ : str = self.processor.tokenajson(a_ )
self.assertDictEqual(a_ , a_ )
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__magic_name__ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : List[str] = PegasusConfig
__UpperCAmelCase : str = {}
__UpperCAmelCase : Any = '''gelu'''
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , ):
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Optional[int] = seq_length
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = eos_token_id
lowerCamelCase_ : Union[str, Any] = pad_token_id
lowerCamelCase_ : Tuple = bos_token_id
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ : Optional[int] = prepare_pegasus_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = 20
lowerCamelCase_ : int = model_class_name(a_ )
lowerCamelCase_ : str = model.encode(inputs_dict["input_ids"] )
lowerCamelCase_ : Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCamelCase_ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCamelCase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCamelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
lowerCamelCase_ : Any = model.decode(a_ , a_ )
lowerCamelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = 20
lowerCamelCase_ : Dict = model_class_name(a_ )
lowerCamelCase_ : str = model.encode(inputs_dict["input_ids"] )
lowerCamelCase_ : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCamelCase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCamelCase_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase_ : int = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
lowerCamelCase_ : str = model.decode(a_ , a_ , decoder_attention_mask=a_ )
lowerCamelCase_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ : List[str] = np.not_equal(lowerCAmelCase_ , config.pad_token_id).astype(np.inta)
if decoder_attention_mask is None:
lowerCamelCase_ : Any = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxPegasusModelTester(self )
lowerCamelCase_ : Dict = ConfigTester(self , config_class=a_ )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : str = self._prepare_for_class(a_ , a_ )
lowerCamelCase_ : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_ , a_=None , **a_ ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ : List[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ : int = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Optional[int] = model_class(a_ )
lowerCamelCase_ : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCamelCase_ : List[str] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a_ , a_ , a_ ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest("JIT Enabled" ):
lowerCamelCase_ : int = decode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase_ : Optional[Any] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=a_ )
lowerCamelCase_ : Any = np.ones((1, 1) )
lowerCamelCase_ : Any = model(a_ )
self.assertIsNotNone(a_ )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
lowerCamelCase_ : Dict = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
lowerCamelCase_ : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowerCamelCase_ : List[str] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
lowerCamelCase_ : int = tokenizer(a_ , return_tensors="np" , truncation=a_ , max_length=512 , padding=a_ )
lowerCamelCase_ : Dict = model.generate(**a_ , num_beams=2 ).sequences
lowerCamelCase_ : List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
assert tgt_text == decoded
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''blip_text_model'''
def __init__( self , a_=3_0524 , a_=768 , a_=768 , a_=3072 , a_=768 , a_=12 , a_=8 , a_=512 , a_="gelu" , a_=1E-12 , a_=0.0 , a_=0.0 , a_=0.02 , a_=3_0522 , a_=2 , a_=0 , a_=102 , a_=True , a_=True , **a_ , ):
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , sep_token_id=a_ , **a_ , )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = encoder_hidden_size
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Optional[Any] = projection_dim
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : Optional[Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : int = is_decoder
lowerCamelCase_ : Union[str, Any] = use_cache
@classmethod
def _UpperCamelCase ( cls , a_ , **a_ ):
cls._set_token_in_kwargs(a_ )
lowerCamelCase_ : Optional[int] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase_ : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = '''blip_vision_model'''
def __init__( self , a_=768 , a_=3072 , a_=512 , a_=12 , a_=12 , a_=384 , a_=16 , a_="gelu" , a_=1E-5 , a_=0.0 , a_=1E-10 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : int = intermediate_size
lowerCamelCase_ : str = projection_dim
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : Dict = patch_size
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : Tuple = layer_norm_eps
lowerCamelCase_ : Optional[Any] = hidden_act
@classmethod
def _UpperCamelCase ( cls , a_ , **a_ ):
cls._set_token_in_kwargs(a_ )
lowerCamelCase_ : List[Any] = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase_ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = '''blip'''
__UpperCAmelCase : Optional[int] = True
def __init__( self , a_=None , a_=None , a_=512 , a_=2.65_92 , a_=256 , **a_ , ):
super().__init__(**a_ )
if text_config is None:
lowerCamelCase_ : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCamelCase_ : List[str] = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCamelCase_ : Dict = BlipTextConfig(**a_ )
lowerCamelCase_ : str = BlipVisionConfig(**a_ )
lowerCamelCase_ : int = self.vision_config.hidden_size
lowerCamelCase_ : List[str] = projection_dim
lowerCamelCase_ : Any = logit_scale_init_value
lowerCamelCase_ : int = 1.0
lowerCamelCase_ : List[Any] = 0.02
lowerCamelCase_ : int = image_text_hidden_size
@classmethod
def _UpperCamelCase ( cls , a_ , a_ , **a_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Tuple = self.text_config.to_dict()
lowerCamelCase_ : Tuple = self.vision_config.to_dict()
lowerCamelCase_ : int = self.__class__.model_type
return output
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Tuple = 10
lowerCamelCase_ : Union[str, Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}),
"id": datasets.Value("int64"),
})
lowerCamelCase_ : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowerCAmelCase_)),
} , features=lowerCAmelCase_ , )
return dataset
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=lowerCAmelCase_)
return filename
# FILE_CONTENT + files
__magic_name__ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt"
lowerCamelCase_ : List[str] = FILE_CONTENT
with open(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_)
return filename
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
import bza
lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt.bz2"
lowerCamelCase_ : Dict = bytes(lowerCAmelCase_ , "utf-8")
with bza.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
import gzip
lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8")
with gzip.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "file.txt.lz4"
lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8")
with lza.frame.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase_ : List[Any] = tmp_path_factory.mktemp("data") / "file.txt.7z"
with pyazr.SevenZipFile(lowerCAmelCase_ , "w") as archive:
archive.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
import tarfile
lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "file.txt.tar"
with tarfile.TarFile(lowerCAmelCase_ , "w") as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
import lzma
lowerCamelCase_ : Any = tmp_path_factory.mktemp("data") / "file.txt.xz"
lowerCamelCase_ : int = bytes(lowerCAmelCase_ , "utf-8")
with lzma.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
import zipfile
lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "file.txt.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "file.txt.zst"
lowerCamelCase_ : Optional[int] = bytes(lowerCAmelCase_ , "utf-8")
with zstd.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "file.xml"
lowerCamelCase_ : Any = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>")
with open(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_)
return filename
__magic_name__ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__magic_name__ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__magic_name__ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__magic_name__ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__magic_name__ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="session")
def __magic_name__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = datasets.Dataset.from_dict(lowerCAmelCase_)
lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.sqlite")
with contextlib.closing(sqlitea.connect(lowerCAmelCase_)) as con:
lowerCamelCase_ : str = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)")
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(lowerCAmelCase_ , "w" , newline="") as f:
lowerCamelCase_ : str = csv.DictWriter(lowerCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset2.csv")
with open(lowerCAmelCase_ , "w" , newline="") as f:
lowerCamelCase_ : int = csv.DictWriter(lowerCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
import bza
lowerCamelCase_ : Tuple = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(lowerCAmelCase_ , "rb") as f:
lowerCamelCase_ : Tuple = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCAmelCase_ , "wb") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV")))
f.write(lowerCAmelCase_ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV")))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
lowerCamelCase_ : Optional[Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
})
with open(lowerCAmelCase_ , "wb") as f:
lowerCamelCase_ : Any = pq.ParquetWriter(lowerCAmelCase_ , schema=lowerCAmelCase_)
lowerCamelCase_ : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCAmelCase_))] for k in DATA[0]} , schema=lowerCAmelCase_)
writer.write_table(lowerCAmelCase_)
writer.close()
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data") / "dataset.json")
lowerCamelCase_ : Optional[Any] = {"data": DATA}
with open(lowerCAmelCase_ , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = str(tmp_path_factory.mktemp("data") / "dataset.json")
lowerCamelCase_ : List[Any] = {"data": DATA_DICT_OF_LISTS}
with open(lowerCAmelCase_ , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(lowerCAmelCase_ , "w") as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_) + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl")
with open(lowerCAmelCase_ , "w") as f:
for item in DATA:
f.write(json.dumps(lowerCAmelCase_) + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(lowerCAmelCase_ , "w") as f:
for item in DATA_312:
f.write(json.dumps(lowerCAmelCase_) + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(lowerCAmelCase_ , "w") as f:
for item in DATA_STR:
f.write(json.dumps(lowerCAmelCase_) + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
import gzip
lowerCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(lowerCAmelCase_ , "rb") as orig_file:
with gzip.open(lowerCAmelCase_ , "wb") as zipped_file:
zipped_file.writelines(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
import gzip
lowerCamelCase_ : List[str] = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(lowerCAmelCase_ , "rb") as orig_file:
with gzip.open(lowerCAmelCase_ , "wb") as zipped_file:
zipped_file.writelines(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(lowerCAmelCase_)))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar"
with tarfile.TarFile(lowerCAmelCase_ , "w") as f:
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
f.add(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowerCAmelCase_ , "w") as f:
f.add(lowerCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(lowerCAmelCase_)))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["0", "1", "2", "3"]
lowerCamelCase_ : Dict = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(lowerCAmelCase_ , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = ["0", "1", "2", "3"]
lowerCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("data") / "dataset2.txt")
with open(lowerCAmelCase_ , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = ["0", "1", "2", "3"]
lowerCamelCase_ : str = tmp_path_factory.mktemp("data") / "dataset.abc"
with open(lowerCAmelCase_ , "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data") / "dataset.text.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
f.write(lowerCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(lowerCAmelCase_)))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("data") / "dataset.ext.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename("unsupported.ext"))
f.write(lowerCAmelCase_ , arcname=os.path.basename("unsupported_2.ext"))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"])
lowerCamelCase_ : str = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt")
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f:
f.write(lowerCAmelCase_)
return path
@pytest.fixture(scope="session")
def __magic_name__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg")
@pytest.fixture(scope="session")
def __magic_name__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav")
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = tmp_path_factory.mktemp("data") / "dataset.img.zip"
with zipfile.ZipFile(lowerCAmelCase_ , "w") as f:
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_))
f.write(lowerCAmelCase_ , arcname=os.path.basename(lowerCAmelCase_).replace(".jpg" , "2.jpg"))
return path
@pytest.fixture(scope="session")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = tmp_path_factory.mktemp("data_dir")
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / "subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w") as f:
f.write("bar\n" * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w") as f:
f.write("foo\n" * 10)
with open(data_dir / ".subdir" / "test.txt" , "w") as f:
f.write("bar\n" * 10)
return data_dir
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
import os
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : str = os.path.join(os.path.dirname(lowerCAmelCase_) , "num.txt")
with open(lowerCAmelCase_) as file_hand:
return str(sum(int(lowerCAmelCase_) for line in file_hand))[:10]
if __name__ == "__main__":
print(solution())
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''}, )
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
__UpperCAmelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the task to train on.'''}, )
__UpperCAmelCase : Optional[List[str]] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='''accuracy''', metadata={'''help''': '''The evaluation metric used for the task.'''} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='''no''', metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=10, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0, metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''}, )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''}, )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0, metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=100, metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''}, )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=__lowerCamelCase, metadata={'''help''': '''Random seed for initialization.'''}, )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
lowerCamelCase_ : Optional[Any] = dataset.filter(lambda lowerCAmelCase_: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCamelCase_ : Union[str, Any] = int(eval_result * len(lowerCAmelCase_))
print(lowerCAmelCase_)
lowerCamelCase_ : Tuple = dataset.sort("probability" , reverse=lowerCAmelCase_)
lowerCamelCase_ : Dict = dataset.select(range(lowerCAmelCase_))
lowerCamelCase_ : List[Any] = dataset.remove_columns(["label", "probability"])
lowerCamelCase_ : int = dataset.rename_column("prediction" , "label")
lowerCamelCase_ : Dict = dataset.map(lambda lowerCAmelCase_: {"label": idalabel[example["label"]]})
lowerCamelCase_ : List[str] = dataset.shuffle(seed=args.seed)
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , F"""train_pseudo.{args.data_file_extension}""")
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase_ , index=lowerCAmelCase_)
else:
dataset.to_json(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ : Any = STModelArguments(model_name_or_path=lowerCAmelCase_)
lowerCamelCase_ : Tuple = STDataArguments(train_file=lowerCAmelCase_ , infer_file=lowerCAmelCase_)
lowerCamelCase_ : Tuple = STTrainingArguments(output_dir=lowerCAmelCase_)
lowerCamelCase_ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase_).items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Sanity checks
lowerCamelCase_ : Union[str, Any] = {}
lowerCamelCase_ : Union[str, Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCamelCase_ : Optional[Any] = args.train_file
lowerCamelCase_ : Optional[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCamelCase_ : Union[str, Any] = args.eval_file
for key in data_files:
lowerCamelCase_ : int = data_files[key].split(".")[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCamelCase_ : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info("Creating the initial data directory for self-training...")
lowerCamelCase_ : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCamelCase_ : str = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_)
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
accelerator.wait_for_everyone()
lowerCamelCase_ : int = None
lowerCamelCase_ : str = None
lowerCamelCase_ : str = 0
lowerCamelCase_ : int = False
# Show the progress bar
lowerCamelCase_ : Optional[int] = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
lowerCamelCase_ : Dict = data_dir_format(lowerCAmelCase_)
assert os.path.exists(lowerCAmelCase_)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCamelCase_ : Any = os.path.join(lowerCAmelCase_ , "stage-1")
lowerCamelCase_ : Union[str, Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase_ , lowerCAmelCase_):
arguments_dict.update({key: value})
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "best-checkpoint" , lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCAmelCase_ , lowerCAmelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCAmelCase_)
finetune(**lowerCAmelCase_)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase_)
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCAmelCase_)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCamelCase_ : str = os.path.join(lowerCAmelCase_ , "best-checkpoint")
lowerCamelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , "stage-2")
# Update arguments_dict
lowerCamelCase_ : Tuple = model_path
lowerCamelCase_ : Any = data_files["train"]
lowerCamelCase_ : Optional[Any] = current_output_dir
lowerCamelCase_ : List[str] = os.path.join(lowerCAmelCase_ , "best-checkpoint" , lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCAmelCase_ , lowerCAmelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCAmelCase_)
finetune(**lowerCAmelCase_)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase_)
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCAmelCase_)
lowerCamelCase_ : int = iteration
lowerCamelCase_ : Any = data_dir_format(iteration + 1)
lowerCamelCase_ : Any = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "best-checkpoint"))
lowerCamelCase_ : Tuple = config.idalabel
lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , "eval_results_best-checkpoint.json")
lowerCamelCase_ : int = os.path.join(lowerCAmelCase_ , "test_results_best-checkpoint.json")
assert os.path.exists(lowerCAmelCase_)
with open(lowerCAmelCase_ , "r") as f:
lowerCamelCase_ : List[str] = float(json.load(lowerCAmelCase_)[args.eval_metric])
lowerCamelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , "infer_output_best-checkpoint.csv")
assert os.path.exists(lowerCAmelCase_)
# Loading the dataset from local csv or json files.
lowerCamelCase_ : Tuple = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]})["data"]
lowerCamelCase_ : Any = load_dataset("csv" , data_files={"data": infer_output_file})["data"]
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
shutil.copy(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{iteration}.json"""))
if os.path.exists(lowerCAmelCase_):
shutil.copy(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , F"""test_results_iter-{iteration}.json"""))
create_pseudo_labeled_data(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
accelerator.wait_for_everyone()
lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , F"""train_pseudo.{args.data_file_extension}""")
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCamelCase_ : Union[str, Any] = eval_result
if best_iteration is None:
lowerCamelCase_ : Dict = new_iteration
lowerCamelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCamelCase_ : Optional[int] = new_iteration
lowerCamelCase_ : Dict = new_eval_result
lowerCamelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
lowerCamelCase_ : Optional[int] = new_iteration
lowerCamelCase_ : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCamelCase_ : Optional[Any] = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCAmelCase_)
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{iteration}.json""") , os.path.join(lowerCAmelCase_ , "eval_results_best-iteration.json") , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1)
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase_ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""") , os.path.join(lowerCAmelCase_ , "eval_results_best-iteration.json") , )
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase_ : int = array[indexa], array[indexa]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : Optional[Any] = int(length / 2)
for i in range(lowerCAmelCase_ , low + middle):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : List[Any] = int(length / 2)
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''canine'''
def __init__( self , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_6384 , a_=16 , a_=0.02 , a_=1E-12 , a_=0 , a_=0Xe000 , a_=0Xe001 , a_=4 , a_=4 , a_=8 , a_=1_6384 , a_=128 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Dict = max_position_embeddings
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : List[str] = layer_norm_eps
# Character config:
lowerCamelCase_ : List[Any] = downsampling_rate
lowerCamelCase_ : Union[str, Any] = upsampling_kernel_size
lowerCamelCase_ : Optional[Any] = num_hash_functions
lowerCamelCase_ : str = num_hash_buckets
lowerCamelCase_ : int = local_transformer_stride
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
# Algorithm for the pigeonhole sorting
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = min(lowerCAmelCase_) # min() finds the minimum value
lowerCamelCase_ : str = max(lowerCAmelCase_) # max() finds the maximum value
lowerCamelCase_ : Dict = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ : int = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ : Optional[Any] = 0
for count in range(lowerCAmelCase_):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ : Optional[int] = count + min_val
i += 1
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_)
print("Sorted order is:" , " ".join(lowerCAmelCase_))
if __name__ == "__main__":
main()
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
super().__init__(*a_ , **a_ )
requires_backends(self , "decord" )
self.check_model_type(a_ )
def _UpperCamelCase ( self , a_=None , a_=None , a_=None ):
lowerCamelCase_ : Dict = {}
if frame_sampling_rate is not None:
lowerCamelCase_ : Dict = frame_sampling_rate
if num_frames is not None:
lowerCamelCase_ : Any = num_frames
lowerCamelCase_ : Optional[int] = {}
if top_k is not None:
lowerCamelCase_ : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ):
return super().__call__(a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_=None , a_=1 ):
if num_frames is None:
lowerCamelCase_ : str = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
lowerCamelCase_ : int = BytesIO(requests.get(a_ ).content )
lowerCamelCase_ : Optional[int] = VideoReader(a_ )
videoreader.seek(0 )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : str = num_frames * frame_sampling_rate - 1
lowerCamelCase_ : str = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
lowerCamelCase_ : List[str] = videoreader.get_batch(a_ ).asnumpy()
lowerCamelCase_ : int = list(a_ )
lowerCamelCase_ : Union[str, Any] = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : int = self.model(**a_ )
return model_outputs
def _UpperCamelCase ( self , a_ , a_=5 ):
if top_k > self.model.config.num_labels:
lowerCamelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase_ : Tuple = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase_ : Tuple = probs.topk(a_ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCamelCase_ : Tuple = scores.tolist()
lowerCamelCase_ : Union[str, Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''lilt'''
def __init__( self , a_=3_0522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_="absolute" , a_=None , a_=4 , a_=1024 , **a_ , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Tuple = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : Tuple = position_embedding_type
lowerCamelCase_ : Union[str, Any] = classifier_dropout
lowerCamelCase_ : List[str] = channel_shrink_ratio
lowerCamelCase_ : Union[str, Any] = max_ad_position_embeddings
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__magic_name__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__magic_name__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__magic_name__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__magic_name__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__magic_name__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _UpperCamelCase ( self , a_ , a_ , a_=[1, 10, 100] , a_=4 , a_=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : List[str] = Counter()
lowerCamelCase_ : str = 0
lowerCamelCase_ : int = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
lowerCamelCase_ : str = candidate + "\n" + test_case
lowerCamelCase_ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase_ : Optional[Any] = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
lowerCamelCase_ : Dict = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowerCamelCase_ : Optional[Any] = [], []
for result in results.values():
result.sort()
lowerCamelCase_ : Optional[Any] = [r[1]["passed"] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
lowerCamelCase_ : int = np.array(a_ )
lowerCamelCase_ : List[str] = np.array(a_ )
lowerCamelCase_ : List[str] = k
lowerCamelCase_ : List[Any] = {F"""pass@{k}""": estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_))
else:
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = iter(lowerCAmelCase_)
return np.array([estimator(int(lowerCAmelCase_) , int(lowerCAmelCase_) , lowerCAmelCase_) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_)])
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : str = '''LayoutLMv3ImageProcessor'''
__UpperCAmelCase : Tuple = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCamelCase_ : Dict = kwargs.pop("feature_extractor" )
lowerCamelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowerCamelCase_ : Optional[Any] = self.image_processor(images=a_ , return_tensors=a_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a_ , a_ ):
lowerCamelCase_ : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : Optional[Any] = features["words"]
lowerCamelCase_ : Optional[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel values
lowerCamelCase_ : Any = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCamelCase_ : str = self.get_overflowing_images(a_ , encoded_inputs["overflow_to_sample_mapping"] )
lowerCamelCase_ : Union[str, Any] = images
return encoded_inputs
def _UpperCamelCase ( self , a_ , a_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a_ ) != len(a_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(a_ )} and {len(a_ )}""" )
return images_with_overflow
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!")
elif p == 2:
return True
lowerCamelCase_ : Tuple = 4
lowerCamelCase_ : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2):
lowerCamelCase_ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from math import factorial, pi
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 30):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
lowerCamelCase_ : str = float(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(lowerCAmelCase_))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 30):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
lowerCamelCase_ : Dict = float(lowerCAmelCase_)
lowerCamelCase_ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(lowerCAmelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
# using dfs for finding eulerian path traversal
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase_ : Any = True, True
lowerCamelCase_ : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return path
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Optional[int] = -1
for i in range(lowerCAmelCase_):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase_ : Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
lowerCamelCase_ : Any = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_)
if check == 3:
print("graph is not Eulerian")
print("no path")
return
lowerCamelCase_ : Any = 1
if check == 2:
lowerCamelCase_ : int = odd_node
print("graph has a Euler path")
if check == 1:
print("graph has a Euler cycle")
lowerCamelCase_ : Optional[int] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
print(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase_ : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase_ : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase_ : str = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase_ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase_ : Dict = 10
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
from math import pi
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
if dist[i][j] != float("inf"):
print(int(dist[i][j]) , end="\t")
else:
print("INF" , end="\t")
print()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = [[float("inf") for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_):
# looping through rows of graph array
for i in range(lowerCAmelCase_):
# looping through columns of graph array
for j in range(lowerCAmelCase_):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase_ : Any = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_)
return dist, v
if __name__ == "__main__":
__magic_name__ : List[str] = int(input('''Enter number of vertices: '''))
__magic_name__ : int = int(input('''Enter number of edges: '''))
__magic_name__ : Any = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ : Any = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__magic_name__ : Optional[Any] = int(input('''Enter source:'''))
__magic_name__ : List[str] = int(input('''Enter destination:'''))
__magic_name__ : int = float(input('''Enter weight:'''))
__magic_name__ : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ : List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ : Dict = DisjunctiveConstraint(a_ )
self.assertTrue(isinstance(dc.token_ids , a_ ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a_ ):
DisjunctiveConstraint(a_ ) # fails here
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ : Optional[Any] = DisjunctiveConstraint(a_ )
lowerCamelCase_ : Dict = dc.update(1 )
lowerCamelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ : List[str] = dc.update(2 )
lowerCamelCase_ : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ : str = dc.update(3 )
lowerCamelCase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(a_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ : Union[str, Any] = DisjunctiveConstraint(a_ )
lowerCamelCase_ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ = 5000_0000):
'''simple docstring'''
lowerCamelCase_ : int = set()
lowerCamelCase_ : List[str] = int((limit - 24) ** (1 / 2))
lowerCamelCase_ : Any = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_)))
for primea in primes:
lowerCamelCase_ : Union[str, Any] = primea * primea
for primea in primes:
lowerCamelCase_ : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ : int = primea * primea * primea * primea
lowerCamelCase_ : int = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_)
return len(lowerCAmelCase_)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__magic_name__ = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def _A ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = [0] * len(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : str = [1] * len(lowerCAmelCase_)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase_)):
if indegree[i] == 0:
queue.append(lowerCAmelCase_)
while queue:
lowerCamelCase_ : List[str] = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase_ : Dict = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase_)
print(max(lowerCAmelCase_))
# Adjacency list of Graph
__magic_name__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase_)
lowerCamelCase_ : int = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''sigmoid'''
__UpperCAmelCase : List[str] = '''softmax'''
__UpperCAmelCase : Tuple = '''none'''
@add_end_docstrings(
__lowerCamelCase, r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''', )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = ClassificationFunction.NONE
def __init__( self , **a_ ):
super().__init__(**a_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , a_=None , a_=None , a_="" , **a_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCamelCase_ : Dict = tokenizer_kwargs
lowerCamelCase_ : Dict = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowerCamelCase_ : Tuple = self.model.config.return_all_scores
if isinstance(a_ , a_ ) or top_k is None:
lowerCamelCase_ : Union[str, Any] = top_k
lowerCamelCase_ : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , a_ , )
if return_all_scores:
lowerCamelCase_ : Any = None
else:
lowerCamelCase_ : Optional[int] = 1
if isinstance(a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase_ : str = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a_ , **a_ ):
lowerCamelCase_ : int = super().__call__(*a_ , **a_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase_ : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] , a_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , a_ , **a_ ):
lowerCamelCase_ : str = self.framework
if isinstance(a_ , a_ ):
return self.tokenizer(**a_ , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(a_ , return_tensors=a_ , **a_ )
def _UpperCamelCase ( self , a_ ):
return self.model(**a_ )
def _UpperCamelCase ( self , a_ , a_=None , a_=1 , a_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase_ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase_ : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowerCamelCase_ : Dict = self.model.config.function_to_apply
else:
lowerCamelCase_ : int = ClassificationFunction.NONE
lowerCamelCase_ : Tuple = model_outputs["logits"][0]
lowerCamelCase_ : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase_ : Dict = sigmoid(a_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase_ : Optional[Any] = softmax(a_ )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase_ : int = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase_ : Tuple = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(a_ )
]
if not _legacy:
dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ )
if top_k is not None:
lowerCamelCase_ : Any = dict_scores[:top_k]
return dict_scores
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__magic_name__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__magic_name__ = None
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase_ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def remove_articles(lowerCAmelCase_):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase_)
def white_space_fix(lowerCAmelCase_):
return " ".join(text.split())
def remove_punc(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCAmelCase_):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_))))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCAmelCase_).split()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int(normalize_answer(lowerCAmelCase_) == normalize_answer(lowerCAmelCase_))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = get_tokens(lowerCAmelCase_)
lowerCamelCase_ : Tuple = get_tokens(lowerCAmelCase_)
lowerCamelCase_ : Any = collections.Counter(lowerCAmelCase_) & collections.Counter(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = sum(common.values())
if len(lowerCAmelCase_) == 0 or len(lowerCAmelCase_) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCamelCase_ : Optional[Any] = 1.0 * num_same / len(lowerCAmelCase_)
lowerCamelCase_ : Any = 1.0 * num_same / len(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = {}
lowerCamelCase_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Union[str, Any] = qa["id"]
lowerCamelCase_ : Tuple = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase_)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ : Dict = [""]
if qid not in preds:
print(F"""Missing prediction for {qid}""")
continue
lowerCamelCase_ : str = preds[qid]
# Take max over all gold answers
lowerCamelCase_ : int = max(compute_exact(lowerCAmelCase_ , lowerCAmelCase_) for a in gold_answers)
lowerCamelCase_ : List[str] = max(compute_fa(lowerCAmelCase_ , lowerCAmelCase_) for a in gold_answers)
return exact_scores, fa_scores
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = {}
for qid, s in scores.items():
lowerCamelCase_ : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ : Any = float(not qid_to_has_ans[qid])
else:
lowerCamelCase_ : Dict = s
return new_scores
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
if not qid_list:
lowerCamelCase_ : List[Any] = len(lowerCAmelCase_)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCamelCase_ : Tuple = len(lowerCAmelCase_)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for k in new_eval:
lowerCamelCase_ : Tuple = new_eval[k]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
plt.step(lowerCAmelCase_ , lowerCAmelCase_ , color="b" , alpha=0.2 , where="post")
plt.fill_between(lowerCAmelCase_ , lowerCAmelCase_ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(lowerCAmelCase_)
plt.savefig(lowerCAmelCase_)
plt.clf()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: na_probs[k])
lowerCamelCase_ : List[Any] = 0.0
lowerCamelCase_ : List[Any] = 1.0
lowerCamelCase_ : Optional[Any] = 0.0
lowerCamelCase_ : Union[str, Any] = [1.0]
lowerCamelCase_ : int = [0.0]
lowerCamelCase_ : Tuple = 0.0
for i, qid in enumerate(lowerCAmelCase_):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ : List[Any] = true_pos / float(i + 1)
lowerCamelCase_ : List[str] = true_pos / float(lowerCAmelCase_)
if i == len(lowerCAmelCase_) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase_)
recalls.append(lowerCAmelCase_)
if out_image:
plot_pr_curve(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCAmelCase_):
os.makedirs(lowerCAmelCase_)
lowerCamelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCamelCase_ : Union[str, Any] = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCamelCase_ : Dict = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCamelCase_ : Any = {k: float(lowerCAmelCase_) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ : Any = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_exact")
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_f1")
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_oracle")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not qid_list:
return
lowerCamelCase_ : Tuple = [na_probs[k] for k in qid_list]
lowerCamelCase_ : str = np.ones_like(lowerCAmelCase_) / float(len(lowerCAmelCase_))
plt.hist(lowerCAmelCase_ , weights=lowerCAmelCase_ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F"""Histogram of no-answer probability: {name}""")
plt.savefig(os.path.join(lowerCAmelCase_ , F"""na_prob_hist_{name}.png"""))
plt.clf()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCamelCase_ : Optional[Any] = num_no_ans
lowerCamelCase_ : Union[str, Any] = cur_score
lowerCamelCase_ : List[str] = 0.0
lowerCamelCase_ : str = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: na_probs[k])
for i, qid in enumerate(lowerCAmelCase_):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ : Tuple = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ : str = -1
else:
lowerCamelCase_ : int = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ : int = cur_score
lowerCamelCase_ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase_), best_thresh
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : List[Any] = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = best_exact
lowerCamelCase_ : List[str] = exact_thresh
lowerCamelCase_ : Optional[Any] = best_fa
lowerCamelCase_ : List[str] = fa_thresh
def __magic_name__ ( ):
'''simple docstring'''
with open(OPTS.data_file) as f:
lowerCamelCase_ : str = json.load(lowerCAmelCase_)
lowerCamelCase_ : Tuple = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCamelCase_ : Optional[Any] = json.load(lowerCAmelCase_)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCamelCase_ : Optional[int] = json.load(lowerCAmelCase_)
else:
lowerCamelCase_ : str = {k: 0.0 for k in preds}
lowerCamelCase_ : Union[str, Any] = make_qid_to_has_ans(lowerCAmelCase_) # maps qid to True/False
lowerCamelCase_ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ : int = get_raw_scores(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Tuple = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh)
lowerCamelCase_ : Optional[int] = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh)
lowerCamelCase_ : int = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_)
if has_ans_qids:
lowerCamelCase_ : List[Any] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_)
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "HasAns")
if no_ans_qids:
lowerCamelCase_ : Union[str, Any] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_)
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir)
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
else:
print(json.dumps(lowerCAmelCase_ , indent=2))
if __name__ == "__main__":
__magic_name__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__UpperCAmelCase : Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = AudioClassificationPipeline(model=a_ , feature_extractor=a_ )
# test with a raw waveform
lowerCamelCase_ : List[str] = np.zeros((3_4000,) )
lowerCamelCase_ : Tuple = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Tuple = examples
lowerCamelCase_ : List[Any] = audio_classifier(a_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
lowerCamelCase_ : Tuple = audio_classifier(a_ , top_k=1 )
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
self.run_torchaudio(a_ )
@require_torchaudio
def _UpperCamelCase ( self , a_ ):
import datasets
# test with a local file
lowerCamelCase_ : Union[str, Any] = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
lowerCamelCase_ : Optional[int] = dataset[0]["audio"]["array"]
lowerCamelCase_ : Dict = audio_classifier(a_ )
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "anton-l/wav2vec2-random-tiny-classifier"
lowerCamelCase_ : Optional[int] = pipeline("audio-classification" , model=a_ )
lowerCamelCase_ : Dict = np.ones((8000,) )
lowerCamelCase_ : Tuple = audio_classifier(a_ , top_k=4 )
lowerCamelCase_ : Union[str, Any] = [
{"score": 0.08_42, "label": "no"},
{"score": 0.08_38, "label": "up"},
{"score": 0.08_37, "label": "go"},
{"score": 0.08_34, "label": "right"},
]
lowerCamelCase_ : Any = [
{"score": 0.08_45, "label": "stop"},
{"score": 0.08_44, "label": "on"},
{"score": 0.08_41, "label": "right"},
{"score": 0.08_34, "label": "left"},
]
self.assertIn(nested_simplify(a_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase_ : str = {"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase_ : Dict = audio_classifier(a_ , top_k=4 )
self.assertIn(nested_simplify(a_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCamelCase ( self ):
import datasets
lowerCamelCase_ : Any = "superb/wav2vec2-base-superb-ks"
lowerCamelCase_ : List[str] = pipeline("audio-classification" , model=a_ )
lowerCamelCase_ : int = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
lowerCamelCase_ : Dict = np.array(dataset[3]["speech"] , dtype=np.floataa )
lowerCamelCase_ : Any = audio_classifier(a_ , top_k=4 )
self.assertEqual(
nested_simplify(a_ , decimals=3 ) , [
{"score": 0.9_81, "label": "go"},
{"score": 0.0_07, "label": "up"},
{"score": 0.0_06, "label": "_unknown_"},
{"score": 0.0_01, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _UpperCamelCase ( self ):
pass
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ : int = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = OPTConfig
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = '''gelu'''
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=20 , a_=2 , a_=1 , a_=0 , a_=16 , a_=16 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : str = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Tuple = eos_token_id
lowerCamelCase_ : Union[str, Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = bos_token_id
lowerCamelCase_ : Optional[int] = embed_dim
lowerCamelCase_ : Dict = word_embed_proj_dim
lowerCamelCase_ : List[str] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a_ , **self.config_updates , )
lowerCamelCase_ : Union[str, Any] = prepare_opt_inputs_dict(a_ , a_ )
return config, inputs_dict
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = TFOPTModel(config=a_ )
lowerCamelCase_ : int = inputs_dict["input_ids"]
lowerCamelCase_ : List[Any] = input_ids[:1, :]
lowerCamelCase_ : Optional[Any] = inputs_dict["attention_mask"][:1, :]
lowerCamelCase_ : Optional[Any] = 1
# first forward pass
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , use_cache=a_ )
lowerCamelCase_ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ )[0]
lowerCamelCase_ : int = model(a_ , attention_mask=a_ , past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ : Dict = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_ , a_ , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : str = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[int] = 10
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = TFOPTModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=a_ )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(a_ , a_ ):
if hasattr(a_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_ : Tuple = model_class(config=a_ )
lowerCamelCase_ : Optional[Any] = _get_word_embedding_weight(a_ , model.get_input_embeddings() )
lowerCamelCase_ : str = _get_word_embedding_weight(a_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a_ )
lowerCamelCase_ : Tuple = _get_word_embedding_weight(a_ , model.get_input_embeddings() )
lowerCamelCase_ : int = _get_word_embedding_weight(a_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_ : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , a_ )
# check that weights remain the same after resizing
lowerCamelCase_ : Tuple = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ : List[Any] = False
self.assertTrue(a_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , a_ )
lowerCamelCase_ : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_ : List[str] = False
self.assertTrue(a_ )
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa)
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = 99
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_ : Dict = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_ : str = input_ids.shape[0]
lowerCamelCase_ : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = TFOPTModel.from_pretrained("facebook/opt-350m" )
lowerCamelCase_ : Dict = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase_ : str = tf.not_equal(a_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_ : List[Any] = model(input_ids=a_ , attention_mask=a_ ).last_hidden_state
lowerCamelCase_ : str = (1, 11, 512)
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Optional[int] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=4E-3 ) )
lowerCamelCase_ : Optional[int] = tf.function(a_ , jit_compile=a_ )
lowerCamelCase_ : int = xla_generate(a_ , a_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Tuple = "facebook/opt-350m"
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_ : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_ : Optional[int] = tokenizer(a_ , return_tensors="tf" , padding=a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_ : Dict = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(a_ , a_ , atol=1E-4 ) )
lowerCamelCase_ : Optional[int] = tf.function(a_ , jit_compile=a_ )
lowerCamelCase_ : Optional[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(a_ , a_ , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = "facebook/opt-125m"
lowerCamelCase_ : Dict = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : int = GPTaTokenizer.from_pretrained(a_ )
lowerCamelCase_ : Dict = TFOPTForCausalLM.from_pretrained(a_ )
for prompt in self.prompts:
lowerCamelCase_ : str = tokenizer(a_ , return_tensors="tf" ).input_ids
lowerCamelCase_ : Any = model.generate(a_ , max_length=10 )
lowerCamelCase_ : str = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
predicted_outputs += generated_string
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = "facebook/opt-350m"
lowerCamelCase_ : Any = GPTaTokenizer.from_pretrained(a_ )
lowerCamelCase_ : List[str] = TFOPTForCausalLM.from_pretrained(a_ )
lowerCamelCase_ : Dict = "left"
# use different length sentences to test batching
lowerCamelCase_ : Tuple = [
"Hello, my dog is a little",
"Today, I",
]
lowerCamelCase_ : List[Any] = tokenizer(a_ , return_tensors="tf" , padding=a_ )
lowerCamelCase_ : Union[str, Any] = inputs["input_ids"]
lowerCamelCase_ : str = model.generate(input_ids=a_ , attention_mask=inputs["attention_mask"] )
lowerCamelCase_ : Tuple = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
lowerCamelCase_ : Optional[int] = model.generate(input_ids=a_ )
lowerCamelCase_ : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
lowerCamelCase_ : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
lowerCamelCase_ : List[str] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ : List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
lowerCamelCase_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
lowerCamelCase_ : Union[str, Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = "facebook/opt-350m"
lowerCamelCase_ : Dict = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowerCamelCase_ : Dict = []
lowerCamelCase_ : str = GPTaTokenizer.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = TFOPTForCausalLM.from_pretrained(a_ )
for prompt in self.prompts:
lowerCamelCase_ : List[Any] = tokenizer(a_ , return_tensors="tf" ).input_ids
lowerCamelCase_ : Optional[Any] = model.generate(a_ , max_length=10 )
lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
predicted_outputs += generated_string
self.assertListEqual(a_ , a_ )
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase_ : Optional[int] = mock.Mock()
lowerCamelCase_ : Union[str, Any] = 500
lowerCamelCase_ : str = {}
lowerCamelCase_ : List[str] = HTTPError
lowerCamelCase_ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase_ : List[str] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
lowerCamelCase_ : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase_ : Union[str, Any] = mock.Mock()
lowerCamelCase_ : Any = 500
lowerCamelCase_ : int = {}
lowerCamelCase_ : int = HTTPError
lowerCamelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCamelCase_ : List[str] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
lowerCamelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
lowerCamelCase_ : Union[str, Any] = tempfile.mktemp()
with open(a_ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a_ )
lowerCamelCase_ : Dict = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a_ )
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase_ : Tuple = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Tuple = TOKEN
HfFolder.save_token(a_ )
@classmethod
def _UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : int = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ : str = BertTokenizer(a_ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id="test-tokenizer" , push_to_hub=a_ , use_auth_token=self._token )
lowerCamelCase_ : str = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : List[Any] = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ : Any = BertTokenizer(a_ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase_ : Dict = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a_ , use_auth_token=self._token )
lowerCamelCase_ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _UpperCamelCase ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[int] = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ : Tuple = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ : int = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Optional[Any] = os.path.join(a_ , "vocab.txt" )
with open(a_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase_ : List[Any] = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
lowerCamelCase_ : List[str] = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _UpperCamelCase ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowerCamelCase_ : Dict = Trie()
lowerCamelCase_ : str = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ["AB", "C"] )
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : Tuple = {}
def _UpperCamelCase ( self ):
print(self.vertex )
for i in self.vertex:
print(a_ , " -> " , " -> ".join([str(a_ ) for j in self.vertex[i]] ) )
def _UpperCamelCase ( self , a_ , a_ ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a_ )
else:
# else make a new vertex
lowerCamelCase_ : Any = [to_vertex]
def _UpperCamelCase ( self ):
# visited array for storing already visited nodes
lowerCamelCase_ : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ ):
# mark start vertex as visited
lowerCamelCase_ : Dict = True
print(a_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a_ , a_ )
if __name__ == "__main__":
__magic_name__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __magic_name__ ( lowerCAmelCase_ = 8):
'''simple docstring'''
lowerCamelCase_ : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_) for _ in range(lowerCAmelCase_))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
i -= len(lowerCAmelCase_)
lowerCamelCase_ : List[str] = i // 3
lowerCamelCase_ : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ : Optional[Any] = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder)
+ random(lowerCAmelCase_ , lowerCAmelCase_)
+ random(lowerCAmelCase_ , lowerCAmelCase_)
)
lowerCamelCase_ : int = list(lowerCAmelCase_)
shuffle(lowerCAmelCase_)
return "".join(lowerCAmelCase_)
# random is a generalised function for letters, characters and numbers
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return "".join(secrets.choice(lowerCAmelCase_) for _ in range(lowerCAmelCase_))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
pass # Put your code here...
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
pass # Put your code here...
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
pass # Put your code here...
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 8):
'''simple docstring'''
if len(lowerCAmelCase_) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ : Any = any(char in ascii_uppercase for char in password)
lowerCamelCase_ : Optional[int] = any(char in ascii_lowercase for char in password)
lowerCamelCase_ : Any = any(char in digits for char in password)
lowerCamelCase_ : Any = any(char in punctuation for char in password)
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Please indicate the max length of your password: ").strip())
lowerCamelCase_ : List[Any] = input(
"Please indicate the characters that must be in your password: ").strip()
print("Password generated:" , password_generator(lowerCAmelCase_))
print(
"Alternative Password generated:" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_) , )
print("[If you are thinking of using this passsword, You better save it.]")
if __name__ == "__main__":
main()
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
__magic_name__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__magic_name__ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__magic_name__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
__magic_name__ = [0] * args.vocab_size
for k, v in counter.items():
__magic_name__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = RemBertConfig.from_json_file(lowerCAmelCase_)
print("Building PyTorch model from configuration: {}".format(str(lowerCAmelCase_)))
lowerCamelCase_ : List[Any] = RemBertModel(lowerCAmelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Save pytorch-model
print("Save PyTorch model to {}".format(lowerCAmelCase_))
torch.save(model.state_dict() , lowerCAmelCase_)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
__UpperCAmelCase : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : str = self.block_out_channels[i]
lowerCamelCase_ : List[Any] = self.block_out_channels[i + 1]
lowerCamelCase_ : Tuple = nn.Conv(
a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
lowerCamelCase_ : Optional[Any] = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
lowerCamelCase_ : Union[str, Any] = blocks
lowerCamelCase_ : int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ ):
lowerCamelCase_ : Optional[Any] = self.conv_in(a_ )
lowerCamelCase_ : Optional[Any] = nn.silu(a_ )
for block in self.blocks:
lowerCamelCase_ : str = block(a_ )
lowerCamelCase_ : str = nn.silu(a_ )
lowerCamelCase_ : Any = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 4
__UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCAmelCase : Union[bool, Tuple[bool]] = False
__UpperCAmelCase : Tuple[int] = (320, 640, 1280, 1280)
__UpperCAmelCase : int = 2
__UpperCAmelCase : Union[int, Tuple[int]] = 8
__UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
__UpperCAmelCase : int = 1280
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : str = "rgb"
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
def _UpperCamelCase ( self , a_ ):
# init input tensors
lowerCamelCase_ : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : Union[str, Any] = jnp.zeros(a_ , dtype=jnp.floataa )
lowerCamelCase_ : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : List[str] = jnp.zeros(a_ , dtype=jnp.floataa )
lowerCamelCase_ : List[str] = jax.random.split(a_ )
lowerCamelCase_ : Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(a_ , a_ , a_ , a_ , a_ )["params"]
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.block_out_channels
lowerCamelCase_ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : List[str] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : int = FlaxTimestepEmbedding(a_ , dtype=self.dtype )
lowerCamelCase_ : List[str] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Dict = self.only_cross_attention
if isinstance(a_ , a_ ):
lowerCamelCase_ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_ , a_ ):
lowerCamelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Dict = block_out_channels[0]
lowerCamelCase_ : Any = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : List[str] = output_channel
lowerCamelCase_ : Any = block_out_channels[i]
lowerCamelCase_ : Optional[Any] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : Dict = FlaxCrossAttnDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Dict = FlaxDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : Dict = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
if not is_final_block:
lowerCamelCase_ : Union[str, Any] = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
lowerCamelCase_ : List[str] = down_blocks
lowerCamelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
lowerCamelCase_ : Optional[int] = block_out_channels[-1]
lowerCamelCase_ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : str = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ , a_ , a_ , a_ , a_ = 1.0 , a_ = True , a_ = False , ):
lowerCamelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : int = jnp.flip(a_ , axis=1 )
# 1. time
if not isinstance(a_ , jnp.ndarray ):
lowerCamelCase_ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : str = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Any = jnp.expand_dims(a_ , 0 )
lowerCamelCase_ : Optional[int] = self.time_proj(a_ )
lowerCamelCase_ : Any = self.time_embedding(a_ )
# 2. pre-process
lowerCamelCase_ : Union[str, Any] = jnp.transpose(a_ , (0, 2, 3, 1) )
lowerCamelCase_ : Tuple = self.conv_in(a_ )
lowerCamelCase_ : Tuple = jnp.transpose(a_ , (0, 2, 3, 1) )
lowerCamelCase_ : int = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : str = (sample,)
for down_block in self.down_blocks:
if isinstance(a_ , a_ ):
lowerCamelCase_ : Dict = down_block(a_ , a_ , a_ , deterministic=not train )
else:
lowerCamelCase_ : Tuple = down_block(a_ , a_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : List[str] = self.mid_block(a_ , a_ , a_ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks ):
lowerCamelCase_ : Optional[int] = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : List[Any] = controlnet_down_block_res_samples
lowerCamelCase_ : Any = self.controlnet_mid_block(a_ )
# 6. scaling
lowerCamelCase_ : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_ , mid_block_res_sample=a_ )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCamelCase ( a_ ):
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ):
raise NotImplementedError()
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=False , a_=False , a_=False , a_=2 , a_=99 , a_=0 , a_=32 , a_=5 , a_=4 , a_=0.1 , a_=0.1 , a_=512 , a_=12 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_="last" , a_=None , a_=None , ):
lowerCamelCase_ : str = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : List[Any] = seq_length
lowerCamelCase_ : int = is_training
lowerCamelCase_ : Optional[int] = use_input_lengths
lowerCamelCase_ : Optional[Any] = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Dict = gelu_activation
lowerCamelCase_ : Optional[int] = sinusoidal_embeddings
lowerCamelCase_ : str = causal
lowerCamelCase_ : Optional[int] = asm
lowerCamelCase_ : Dict = n_langs
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : List[Any] = n_special
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : int = type_vocab_size
lowerCamelCase_ : Optional[int] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Tuple = num_labels
lowerCamelCase_ : int = num_choices
lowerCamelCase_ : List[Any] = summary_type
lowerCamelCase_ : Optional[Any] = use_proj
lowerCamelCase_ : List[Any] = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_input_lengths:
lowerCamelCase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ : int = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Any = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , lengths=a_ , langs=a_ )
lowerCamelCase_ : List[str] = model(a_ , langs=a_ )
lowerCamelCase_ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Optional[Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : str = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Tuple = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(a_ )
lowerCamelCase_ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : List[str] = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Dict = model(a_ )
lowerCamelCase_ : Any = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
lowerCamelCase_ : Any = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
(lowerCamelCase_) : Union[str, Any] = result_with_labels.to_tuple()
lowerCamelCase_ : int = model(a_ , start_positions=a_ , end_positions=a_ )
(lowerCamelCase_) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Optional[int] = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ )
lowerCamelCase_ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : List[Any] = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Union[str, Any] = self.num_choices
lowerCamelCase_ : int = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
lowerCamelCase_
) : Dict = config_and_inputs
lowerCamelCase_ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self , a_ , a_ , a_=False ):
lowerCamelCase_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
lowerCamelCase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = FlaubertModelTester(self )
lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Dict = model_class(config=a_ )
lowerCamelCase_ : Tuple = self._prepare_for_class(a_ , a_ )
lowerCamelCase_ : List[Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowerCamelCase_ : Dict = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowerCamelCase_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(a_ )[0]
lowerCamelCase_ : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_ , a_ = True , a_ = False ):
lowerCamelCase_ : Optional[Any] = scheduler
lowerCamelCase_ : str = optimizers if isinstance(a_ , (list, tuple) ) else [optimizers]
lowerCamelCase_ : Optional[Any] = split_batches
lowerCamelCase_ : Dict = step_with_optimizer
lowerCamelCase_ : Dict = GradientState()
def _UpperCamelCase ( self , *a_ , **a_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a_ , **a_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a_ , **a_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCamelCase_ : List[Any] = AcceleratorState().num_processes
for _ in range(a_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a_ , **a_ )
else:
self.scheduler.step(*a_ , **a_ )
def _UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def _UpperCamelCase ( self ):
return self.scheduler.state_dict()
def _UpperCamelCase ( self , a_ ):
self.scheduler.load_state_dict(a_ )
def _UpperCamelCase ( self ):
return self.scheduler.get_lr()
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.scheduler.print_lr(*a_ , **a_ )
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__magic_name__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Tuple = XLNetConfig.from_json_file(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""")
lowerCamelCase_ : Union[str, Any] = finetuning_task
lowerCamelCase_ : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase_ : List[Any] = XLNetForSequenceClassification(lowerCAmelCase_)
elif "squad" in finetuning_task:
lowerCamelCase_ : str = finetuning_task
lowerCamelCase_ : str = XLNetForQuestionAnswering(lowerCAmelCase_)
else:
lowerCamelCase_ : int = XLNetLMHeadModel(lowerCAmelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Save pytorch-model
lowerCamelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : int = os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
print(F"""Save PyTorch model to {os.path.abspath(lowerCAmelCase_)}""")
torch.save(model.state_dict() , lowerCAmelCase_)
print(F"""Save configuration file to {os.path.abspath(lowerCAmelCase_)}""")
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__magic_name__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__magic_name__ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
__magic_name__ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0")
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * daily_interest_rate * days_between_payments
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0")
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0")
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase = tf.shape(_snake_case )
if tensor.shape == tf.TensorShape(_snake_case ):
return dynamic
UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_snake_case )]
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=_snake_case , name=_snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=1E-5 , _snake_case=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_snake_case , _snake_case ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase = tf.nn.moments(_snake_case , axes=[axis] , keepdims=_snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase = [1] * inputs.shape.rank
UpperCAmelCase = shape_list(_snake_case )[axis]
UpperCAmelCase = tf.reshape(_snake_case , _snake_case )
UpperCAmelCase = tf.reshape(_snake_case , _snake_case )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase = tf.nn.batch_normalization(
_snake_case , _snake_case , _snake_case , offset=_snake_case , scale=_snake_case , variance_epsilon=_snake_case , )
return outputs
def _a ( _snake_case , _snake_case=0 , _snake_case=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase = tf.shape(_snake_case )
UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_snake_case , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , tf.Tensor ):
UpperCAmelCase = tf.convert_to_tensor(_snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( _snake_case , _snake_case , _snake_case = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
_snake_case , tf.cast(_snake_case , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(_snake_case )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase = [x for x in data if len(_snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
UpperCAmelCase = np.asarray(_snake_case )
UpperCAmelCase = 1
UpperCAmelCase = np.array_split(_snake_case , _snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase = np.array_split(_snake_case , _snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_snake_case ):
UpperCAmelCase = chunk_data
else:
UpperCAmelCase = data
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if name in group.attrs:
UpperCAmelCase = [n.decode("""utf8""" ) if hasattr(_snake_case , """decode""" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase = []
UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(_snake_case , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( _snake_case ):
"""simple docstring"""
def _expand_single_ad_tensor(_snake_case ):
if isinstance(_snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _snake_case )
| 74 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase = logging.getLogger()
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = os.path.join(_snake_case , """all_results.json""" )
if os.path.exists(_snake_case ):
with open(_snake_case , """r""" ) as f:
UpperCAmelCase = json.load(_snake_case )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
_UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ):
import xla_spawn
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A ,"""argv""" ,A ):
UpperCAmelCase = time()
xla_spawn.main()
UpperCAmelCase = time()
UpperCAmelCase = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def _UpperCamelCase ( self ):
import xla_spawn
UpperCAmelCase = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(A ,"""argv""" ,A ):
xla_spawn.main()
| 74 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self ,A ,A=2 ,A=56 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=2 ,A=2 ,A=7 ,A="gelu_new" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=4 ,A="block_sparse" ,A=True ,A=False ,A=2 ,A=3 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
super().test_hidden_states_output()
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(A )
def _UpperCamelCase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(A ,A )
UpperCAmelCase = model_class(A )
@jax.jit
def model_jitted(A ,A=None ,**A ):
return model(input_ids=A ,attention_mask=A ,**A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase = model_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def _UpperCamelCase ( self ,A ,A ,A ,A=1e-5 ,A="outputs" ,A=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(A ,A ,A ,A ,A ,A )
| 74 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = jnp.floataa
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setup()
UpperCAmelCase = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*A ,**A ):
UpperCAmelCase = super().__call__(*A ,**A )
UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = FlaxBigBirdForNaturalQuestionsModule
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
def cross_entropy(_snake_case , _snake_case , _snake_case=None ):
UpperCAmelCase = logits.shape[-1]
UpperCAmelCase = (labels[..., None] == jnp.arange(_snake_case )[None]).astype("""f4""" )
UpperCAmelCase = jax.nn.log_softmax(_snake_case , axis=-1 )
UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase = reduction(_snake_case )
return loss
UpperCAmelCase = partial(_snake_case , reduction=jnp.mean )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE = 3_000
SCREAMING_SNAKE_CASE = 10_500
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 5
# tx_args
SCREAMING_SNAKE_CASE = 3e-5
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 20_000
SCREAMING_SNAKE_CASE = 0.00_95
SCREAMING_SNAKE_CASE = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE = "training-expt"
SCREAMING_SNAKE_CASE = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE = "data/nq-validation.jsonl"
def _UpperCamelCase ( self ):
os.makedirs(self.base_dir ,exist_ok=A )
UpperCAmelCase = os.path.join(self.base_dir ,self.save_dir )
UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 4_096 # no dynamic padding on TPUs
def __call__( self ,A ):
UpperCAmelCase = self.collate_fn(A )
UpperCAmelCase = jax.tree_util.tree_map(A ,A )
return batch
def _UpperCamelCase ( self ,A ):
UpperCAmelCase , UpperCAmelCase = self.fetch_inputs(features["""input_ids"""] )
UpperCAmelCase = {
"""input_ids""": jnp.array(A ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(A ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = [self._fetch_inputs(A ) for ids in input_ids]
return zip(*A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = [1 for _ in range(len(A ) )]
while len(A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _a ( _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if seed is not None:
UpperCAmelCase = dataset.shuffle(seed=_snake_case )
for i in range(len(_snake_case ) // batch_size ):
UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_snake_case )
@partial(jax.pmap , axis_name="""batch""" )
def _a ( _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
def loss_fn(_snake_case ):
UpperCAmelCase = model_inputs.pop("""start_labels""" )
UpperCAmelCase = model_inputs.pop("""end_labels""" )
UpperCAmelCase = model_inputs.pop("""pooled_labels""" )
UpperCAmelCase = state.apply_fn(**_snake_case , params=_snake_case , dropout_rng=_snake_case , train=_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
return state.loss_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase , UpperCAmelCase = jax.random.split(_snake_case )
UpperCAmelCase = jax.value_and_grad(_snake_case )
UpperCAmelCase , UpperCAmelCase = grad_fn(state.params )
UpperCAmelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
UpperCAmelCase = jax.lax.pmean(_snake_case , """batch""" )
UpperCAmelCase = state.apply_gradients(grads=_snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _a ( _snake_case , **_snake_case ):
"""simple docstring"""
UpperCAmelCase = model_inputs.pop("""start_labels""" )
UpperCAmelCase = model_inputs.pop("""end_labels""" )
UpperCAmelCase = model_inputs.pop("""pooled_labels""" )
UpperCAmelCase = state.apply_fn(**_snake_case , params=state.params , train=_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
UpperCAmelCase = state.loss_fn(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowerCamelCase__ ( train_state.TrainState ):
SCREAMING_SNAKE_CASE = struct.field(pytree_node=snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def _UpperCamelCase ( self ,A ,A ,A ,A=None ):
UpperCAmelCase = model.params
UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ ,params=A ,tx=A ,loss_fn=A ,)
if ckpt_dir is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = restore_checkpoint(A ,A )
UpperCAmelCase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
UpperCAmelCase , UpperCAmelCase = build_tx(**A )
UpperCAmelCase = train_state.TrainState(
step=A ,apply_fn=model.__call__ ,params=A ,tx=A ,opt_state=A ,)
UpperCAmelCase = args
UpperCAmelCase = data_collator
UpperCAmelCase = lr
UpperCAmelCase = params
UpperCAmelCase = jax_utils.replicate(A )
return state
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.args
UpperCAmelCase = len(A ) // args.batch_size
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(A ,jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase = jnp.array(0 ,dtype=jnp.floataa )
UpperCAmelCase = get_batched_dataset(A ,args.batch_size ,seed=A )
UpperCAmelCase = 0
for batch in tqdm(A ,total=A ,desc=F'''Running EPOCH-{epoch}''' ):
UpperCAmelCase = self.data_collator(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.train_step_fn(A ,A ,**A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase = jax_utils.unreplicate(state.step )
UpperCAmelCase = running_loss.item() / i
UpperCAmelCase = self.scheduler_fn(state_step - 1 )
UpperCAmelCase = self.evaluate(A ,A )
UpperCAmelCase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(A ) )
self.logger.log(A ,commit=A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' ,state=A )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = get_batched_dataset(A ,self.args.batch_size )
UpperCAmelCase = len(A ) // self.args.batch_size
UpperCAmelCase = jnp.array(0 ,dtype=jnp.floataa )
UpperCAmelCase = 0
for batch in tqdm(A ,total=A ,desc="""Evaluating ... """ ):
UpperCAmelCase = self.data_collator(A )
UpperCAmelCase = self.val_step_fn(A ,**A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = jax_utils.unreplicate(A )
print(F'''SAVING CHECKPOINT IN {save_dir}''' ,end=""" ... """ )
self.model_save_fn(A ,params=state.params )
with open(os.path.join(A ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(A ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(A ,"""data_collator.joblib""" ) )
with open(os.path.join(A ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,A )
print("""DONE""" )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(_snake_case , """flax_model.msgpack""" ) , """rb""" ) as f:
UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(_snake_case , """opt_state.msgpack""" ) , """rb""" ) as f:
UpperCAmelCase = from_bytes(state.opt_state , f.read() )
UpperCAmelCase = joblib.load(os.path.join(_snake_case , """args.joblib""" ) )
UpperCAmelCase = joblib.load(os.path.join(_snake_case , """data_collator.joblib""" ) )
with open(os.path.join(_snake_case , """training_state.json""" ) , """r""" ) as f:
UpperCAmelCase = json.load(_snake_case )
UpperCAmelCase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = num_train_steps - warmup_steps
UpperCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=_snake_case , transition_steps=_snake_case )
UpperCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=1E-7 , transition_steps=_snake_case )
UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
def weight_decay_mask(_snake_case ):
UpperCAmelCase = traverse_util.flatten_dict(_snake_case )
UpperCAmelCase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(_snake_case )
UpperCAmelCase = scheduler_fn(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = optax.adamw(learning_rate=_snake_case , weight_decay=_snake_case , mask=_snake_case )
return tx, lr
| 74 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 1 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [0 for i in range(len(_snake_case ) )]
# initialize interval's left pointer and right pointer
UpperCAmelCase , UpperCAmelCase = 0, 0
for i in range(1 , len(_snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCAmelCase = min_edge
while go_next(_snake_case , _snake_case , _snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCAmelCase , UpperCAmelCase = i, i + z_result[i] - 1
return z_result
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return i + z_result[i] < len(_snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''ernie_m'''
SCREAMING_SNAKE_CASE = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self ,A = 250_002 ,A = 768 ,A = 12 ,A = 12 ,A = 3_072 ,A = "gelu" ,A = 0.1 ,A = 0.1 ,A = 514 ,A = 0.02 ,A = 1 ,A = 1e-0_5 ,A=None ,A=False ,A=0.0 ,**A ,):
super().__init__(pad_token_id=A ,**A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout
UpperCAmelCase = is_decoder
UpperCAmelCase = act_dropout
| 74 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''input_values''', '''attention_mask''']
def __init__( self ,A = 1 ,A = 16_000 ,A = 0.0 ,A = False ,A = 80 ,A = 16 ,A = 64 ,A = "hann_window" ,A = 1.0 ,A = 80 ,A = 7_600 ,A = 1e-1_0 ,A = 2 ,A = True ,**A ,):
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase = do_normalize
UpperCAmelCase = return_attention_mask
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = frame_signal_scale
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = reduction_factor
UpperCAmelCase = win_length * sampling_rate // 1_000
UpperCAmelCase = hop_length * sampling_rate // 1_000
UpperCAmelCase = optimal_fft_length(self.sample_size )
UpperCAmelCase = (self.n_fft // 2) + 1
UpperCAmelCase = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,)
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,A ,)
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,A ,)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCamelCase ( A ,A ,A = 0.0 ):
if attention_mask is not None:
UpperCAmelCase = np.array(A ,np.intaa )
UpperCAmelCase = []
for vector, length in zip(A ,attention_mask.sum(-1 ) ):
UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase = padding_value
normed_input_values.append(A )
else:
UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _UpperCamelCase ( self ,A ,):
UpperCAmelCase = spectrogram(
A ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,)
return log_mel_spec.T
def __call__( self ,A = None ,A = None ,A = False ,A = None ,A = False ,A = None ,A = None ,A = None ,A = None ,**A ,):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
UpperCAmelCase = self._process_audio(
A ,A ,A ,A ,A ,A ,A ,A ,**A ,)
else:
UpperCAmelCase = None
if audio_target is not None:
UpperCAmelCase = self._process_audio(
A ,A ,A ,A ,A ,A ,A ,A ,**A ,)
if inputs is None:
return inputs_target
else:
UpperCAmelCase = inputs_target["""input_values"""]
UpperCAmelCase = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase = decoder_attention_mask
return inputs
def _UpperCamelCase ( self ,A ,A = False ,A = False ,A = None ,A = False ,A = None ,A = None ,A = None ,**A ,):
UpperCAmelCase = isinstance(A ,np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase = [np.asarray(A ,dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase = [self._extract_mel_features(A ) for waveform in speech]
UpperCAmelCase = BatchFeature({"""input_values""": features} )
UpperCAmelCase = self.num_mel_bins
else:
UpperCAmelCase = BatchFeature({"""input_values""": speech} )
UpperCAmelCase = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
UpperCAmelCase = feature_size_hack
# convert input values to correct format
UpperCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] ,np.ndarray ):
UpperCAmelCase = [np.asarray(A ,dtype=np.floataa ) for array in input_values]
elif (
not isinstance(A ,np.ndarray )
and isinstance(input_values[0] ,np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(A ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase = (
attention_mask
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] ,attention_mask=A ,padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase = padded_inputs.convert_to_tensors(A )
return padded_inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 74 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=False ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,use_stable_embedding=A ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(
A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,)
UpperCAmelCase = model(
A ,attention_mask=A ,encoder_hidden_states=A ,)
UpperCAmelCase = model(A ,attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase = model(
A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,use_cache=A ,)
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] ,dim=-1 )
UpperCAmelCase = model(
A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,output_hidden_states=A ,)["""hidden_states"""][0]
UpperCAmelCase = model(
A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,past_key_values=A ,output_hidden_states=A ,)["""hidden_states"""][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A ,A ,atol=1e-3 ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenLlamaModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict["""input_ids"""]
UpperCAmelCase = input_ids.ne(1 ).to(A )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = """single_label_classification"""
UpperCAmelCase = input_dict["""input_ids"""]
UpperCAmelCase = input_ids.ne(1 ).to(A )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = """multi_label_classification"""
UpperCAmelCase = input_dict["""input_ids"""]
UpperCAmelCase = input_ids.ne(1 ).to(A )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _UpperCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ids_tensor([1, 10] ,config.vocab_size )
UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase = original_model(A ).last_hidden_state
UpperCAmelCase = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase = scaled_model(A ).last_hidden_state
UpperCAmelCase = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A ,A ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A ,A ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A ,A ,atol=1e-5 ) )
| 74 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
| 74 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" )
UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
UpperCAmelCase = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def _a ( _snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
UpperCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _snake_case )
if "blocks" in key:
UpperCAmelCase = re.sub(R"""blocks""" , """layers""" , _snake_case )
if "attn" in key:
UpperCAmelCase = re.sub(R"""attn""" , """self_attn""" , _snake_case )
if "norm1" in key:
UpperCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , _snake_case )
if "norm2" in key:
UpperCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , _snake_case )
if "encoder.norm" in key:
UpperCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _snake_case )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _snake_case )
if "encoder.pos_embed" in key:
UpperCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _snake_case )
if "encoder.cls_token" in key:
UpperCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _snake_case )
if "self_attn" in key:
UpperCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _snake_case )
return key
@torch.no_grad()
def _a ( _snake_case , _snake_case=None ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = BlipConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase = BlipForConditionalGeneration(_snake_case ).eval()
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase = blip_decoder(pretrained=_snake_case , image_size=384 , vit="""base""" )
UpperCAmelCase = pt_model.eval()
UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
hf_model.load_state_dict(_snake_case )
UpperCAmelCase = 384
UpperCAmelCase = load_demo_image(image_size=_snake_case , device="""cpu""" )
UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase = hf_model.generate(_snake_case )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
vqa_model.eval()
UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
UpperCAmelCase = ["""How many dogs are in this image?"""]
UpperCAmelCase = tokenizer(_snake_case , return_tensors="""pt""" ).input_ids
UpperCAmelCase = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit="""base""" )
itm_model.eval()
UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase = modified_state_dict.pop(_snake_case )
UpperCAmelCase = rename_key(_snake_case )
UpperCAmelCase = value
UpperCAmelCase = BlipForImageTextRetrieval(_snake_case )
UpperCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase = tokenizer(
_snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline
SCREAMING_SNAKE_CASE = ['''image_embeds''', '''negative_image_embeds''', '''image''']
SCREAMING_SNAKE_CASE = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
SCREAMING_SNAKE_CASE = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE = False
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return self.time_input_dim
@property
def _UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
return 100
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase = UNetaDConditionModel(**A )
return model
@property
def _UpperCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ):
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase = DDIMScheduler(**A )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _UpperCamelCase ( self ,A ,A=0 ):
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) )
if str(A ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(A )
else:
UpperCAmelCase = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = """cpu"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**A )
UpperCAmelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(A ) ,return_dict=A ,)[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase = """A red cartoon frog, 4k"""
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" ,torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
UpperCAmelCase = pipeline(
image=A ,image_embeds=A ,negative_image_embeds=A ,generator=A ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,)
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A ,A )
| 74 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 1 |
"""simple docstring"""
import os
def _a ( ):
"""simple docstring"""
UpperCAmelCase = os.path.join(os.path.dirname(_snake_case ) , """num.txt""" )
with open(_snake_case ) as file_hand:
return str(sum(int(_snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 74 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=16 ,A=36 ,A=6 ,A=6 ,A=6 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,token_type_ids=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,sentence_order_label=A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(
A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class in get_values(A ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = AlbertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(A ,attention_mask=A )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,A )
UpperCAmelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
| 74 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = '''google/mobilebert-uncased'''
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = """unwanted, running"""
return input_text, output_text
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = tokenizer.tokenize(A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(A )
UpperCAmelCase = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=A )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=A )
UpperCAmelCase = """UNwant\u00E9d,running"""
UpperCAmelCase = tokenizer.tokenize(A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(A )
UpperCAmelCase = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=A ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase = {}
for i, token in enumerate(A ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=A ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase = tokenizer.encode("""sequence builders""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(
A ,return_attention_mask=A ,return_token_type_ids=A ,return_offsets_mapping=A ,add_special_tokens=A ,)
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(A ,"""do_lower_case""" ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""的""", """人""", """有"""]
UpperCAmelCase = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
| 74 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """Hello world! cécé herlolip"""
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
UpperCAmelCase = roberta.model.encoder.sentence_encoder
UpperCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _snake_case )
UpperCAmelCase = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase = model.roberta.encoder.layer[i]
UpperCAmelCase = roberta_sent_encoder.layers[i]
UpperCAmelCase = layer.attention
UpperCAmelCase = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase = roberta_layer.final_layer_norm.weight
UpperCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase = roberta_layer.fca.weight
UpperCAmelCase = roberta_layer.fca.bias
# output
UpperCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase = roberta_layer.fca.weight
UpperCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase = roberta.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase = roberta.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase = roberta.model.encoder.lm_head.weight
UpperCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase = model(_snake_case )[0]
if classification_head:
UpperCAmelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
UpperCAmelCase = roberta.model(_snake_case )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase = torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 74 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=False ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = DistilBertModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(
A ,attention_mask=A ,start_positions=A ,end_positions=A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = model(
A ,attention_mask=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
UpperCAmelCase = DistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=A )
UpperCAmelCase = self._prepare_for_class(A ,A )
UpperCAmelCase = torch.jit.trace(
A ,(inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A ,os.path.join(A ,"""traced_model.pt""" ) )
UpperCAmelCase = torch.jit.load(os.path.join(A ,"""traced_model.pt""" ) ,map_location=A )
loaded(inputs_dict["""input_ids"""].to(A ) ,inputs_dict["""attention_mask"""].to(A ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(A ,attention_mask=A )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,A )
UpperCAmelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
| 74 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("""T""")
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ,A ):
UpperCAmelCase = data
UpperCAmelCase = self
UpperCAmelCase = 0
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ):
# map from node name to the node object
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
# create a new set with x as its member
UpperCAmelCase = DisjointSetTreeNode(A )
def _UpperCamelCase ( self ,A ):
# find the set x belongs to (with path-compression)
UpperCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
UpperCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _UpperCamelCase ( self ,A ,A ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCAmelCase = nodea
else:
UpperCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _UpperCamelCase ( self ,A ,A ):
# merge 2 disjoint sets
self.link(self.find_set(A ) ,self.find_set(A ) )
class lowerCamelCase__ ( Generic[T] ):
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ,A ,A ):
# add an edge with the given weight
self.add_node(A )
self.add_node(A )
UpperCAmelCase = weight
UpperCAmelCase = weight
def _UpperCamelCase ( self ):
UpperCAmelCase = []
UpperCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A : x[2] )
# creating the disjoint set
UpperCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A )
# MST generation
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edges[index]
index += 1
UpperCAmelCase = disjoint_set.find_set(A )
UpperCAmelCase = disjoint_set.find_set(A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A ,A ,A )
disjoint_set.union(A ,A )
return graph
| 74 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = IFInpaintingSuperResolutionPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self ,A ,A=0 ):
if str(A ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(A )
else:
UpperCAmelCase = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def _UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCamelCase ( self ):
self._test_save_load_local()
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 74 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCamelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCamelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = get_test_to_tester_mapping(A )
UpperCAmelCase = get_test_to_tester_mapping(A )
UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""}
UpperCAmelCase = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_model_to_test_mapping(A )
UpperCAmelCase = get_model_to_test_mapping(A )
UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_model_to_tester_mapping(A )
UpperCAmelCase = get_model_to_tester_mapping(A )
UpperCAmelCase = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
UpperCAmelCase = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(A ) ,A )
self.assertEqual(get_test_info.to_json(A ) ,A )
| 74 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=32 ,A=3 ,A=4 ,A=[10, 20, 30, 40] ,A=[2, 2, 3, 2] ,A=True ,A=True ,A=37 ,A="gelu" ,A=10 ,A=0.02 ,A=["stage2", "stage3", "stage4"] ,A=[2, 3, 4] ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_stages
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=A ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = ConvNextVaModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase = model_class(A )
model.to(A )
model.train()
UpperCAmelCase = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase = model(**A ).loss
loss.backward()
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase = False
UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase = model(**A ).loss
loss.backward()
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = preprocessor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**A )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1e-4 ) )
| 74 |
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 384
if "tiny" in model_name:
UpperCAmelCase = [3, 3, 9, 3]
UpperCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [128, 256, 512, 1024]
UpperCAmelCase = 512
if "large" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [192, 384, 768, 1536]
UpperCAmelCase = 768
if "xlarge" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [256, 512, 1024, 2048]
UpperCAmelCase = 1024
# set label information
UpperCAmelCase = 150
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """ade20k-id2label.json"""
UpperCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ConvNextConfig(
depths=_snake_case , hidden_sizes=_snake_case , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase = UperNetConfig(
backbone_config=_snake_case , auxiliary_in_channels=_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case , )
return config
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = dct.pop(_snake_case )
UpperCAmelCase = val
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_snake_case , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase = get_upernet_config(_snake_case )
UpperCAmelCase = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(_snake_case )
if "bn" in key:
UpperCAmelCase = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase = val
# rename keys
UpperCAmelCase = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify on image
UpperCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" )
UpperCAmelCase = SegformerImageProcessor()
UpperCAmelCase = processor(_snake_case , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase = model(_snake_case )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCamelCase = """pt"""
elif is_tf_available():
_UpperCamelCase = """tf"""
else:
_UpperCamelCase = """jax"""
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = ByTaTokenizer
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _UpperCamelCase ( self ,**A ):
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A )
def _UpperCamelCase ( self ,A ,A=False ,A=20 ,A=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase = []
for i in range(len(A ) ):
try:
UpperCAmelCase = tokenizer.decode([i] ,clean_up_tokenization_spaces=A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda A : re.match(r"""^[ a-zA-Z]+$""" ,t[1] ) ,A ) )
UpperCAmelCase = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=A ) ,A ) )
if max_length is not None and len(A ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(A ) < min_length and len(A ) > 0:
while len(A ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
if " " not in output_txt and len(A ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=A )
+ """ """
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=A )
)
if with_prefix_space:
UpperCAmelCase = """ """ + output_txt
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
return output_txt, output_ids
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
UpperCAmelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] ,batch_without_eos_added["""input_ids"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = """Unicode €."""
UpperCAmelCase = tokenizer(A )
UpperCAmelCase = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] ,A )
# decoding
UpperCAmelCase = tokenizer.decode(A )
self.assertEqual(A ,"""Unicode €.</s>""" )
UpperCAmelCase = tokenizer("""e è é ê ë""" )
UpperCAmelCase = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] ,A )
# decoding
UpperCAmelCase = tokenizer.decode(A )
self.assertEqual(A ,"""e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) ,"""e è é ê ë</s>""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase = tokenizer(A ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A ,A )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCAmelCase = tokenizer(A ,padding=A ,return_tensors=A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" ,A )
self.assertIn("""attention_mask""" ,A )
self.assertNotIn("""decoder_input_ids""" ,A )
self.assertNotIn("""decoder_attention_mask""" ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCAmelCase = tokenizer(
text_target=A ,max_length=32 ,padding="""max_length""" ,truncation=A ,return_tensors=A )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.ta_base_tokenizer
UpperCAmelCase = ["""A long paragraph for summarization. </s>"""]
UpperCAmelCase = ["""Summary of the text. </s>"""]
# fmt: off
UpperCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase = tokenizer(A ,text_target=A )
self.assertEqual(A ,batch["""input_ids"""][0] )
self.assertEqual(A ,batch["""labels"""][0] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
tokenizer.save_pretrained(A )
UpperCAmelCase = tokenizer.__class__.from_pretrained(A )
UpperCAmelCase = after_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
shutil.rmtree(A )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
tokenizer.save_pretrained(A )
UpperCAmelCase = tokenizer.__class__.from_pretrained(A )
UpperCAmelCase = after_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
self.assertIn("""new_additional_special_token""" ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(A ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(A )
def _UpperCamelCase ( self ):
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
with open(os.path.join(A ,"""special_tokens_map.json""" ) ,encoding="""utf-8""" ) as json_file:
UpperCAmelCase = json.load(A )
with open(os.path.join(A ,"""tokenizer_config.json""" ) ,encoding="""utf-8""" ) as json_file:
UpperCAmelCase = json.load(A )
UpperCAmelCase = [F'''<extra_id_{i}>''' for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(A ,"""special_tokens_map.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(A ,A )
with open(os.path.join(A ,"""tokenizer_config.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile:
json.dump(A ,A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
A ,)
self.assertIn(
"""an_additional_special_token""" ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" ,lstrip=A )]
UpperCAmelCase = tokenizer_class.from_pretrained(
A ,additional_special_tokens=A ,)
self.assertIn("""a_new_additional_special_token""" ,tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) ,)
def _UpperCamelCase ( self ):
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A )
UpperCAmelCase = tokenizer_class.from_pretrained(A )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
UpperCAmelCase = self.get_tokenizers(fast=A ,do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
UpperCAmelCase = tokenizer.convert_tokens_to_string(A )
self.assertIsInstance(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCAmelCase = 0
UpperCAmelCase = tokenizer.convert_ids_to_tokens(
A ,skip_special_tokens=A )
for attr in attributes_list:
setattr(A ,attr + """_id""" ,A )
self.assertEqual(getattr(A ,A ) ,A )
self.assertEqual(getattr(A ,attr + """_id""" ) ,A )
setattr(A ,attr + """_id""" ,A )
self.assertEqual(getattr(A ,A ) ,A )
self.assertEqual(getattr(A ,attr + """_id""" ) ,A )
setattr(A ,"""additional_special_tokens_ids""" ,[] )
self.assertListEqual(getattr(A ,"""additional_special_tokens""" ) ,[] )
self.assertListEqual(getattr(A ,"""additional_special_tokens_ids""" ) ,[] )
setattr(A ,"""additional_special_tokens_ids""" ,[token_id_to_test_setters] )
self.assertListEqual(getattr(A ,"""additional_special_tokens""" ) ,[token_to_test_setters] )
self.assertListEqual(getattr(A ,"""additional_special_tokens_ids""" ) ,[token_id_to_test_setters] )
| 74 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _a ( _snake_case = "mumbai" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 74 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase__ ( metaclass=snake_case ):
SCREAMING_SNAKE_CASE = ['''torch''', '''transformers''', '''onnx''']
def __init__( self ,*A ,**A ):
requires_backends(self ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
@classmethod
def _UpperCamelCase ( cls ,*A ,**A ):
requires_backends(cls ,["""torch""", """transformers""", """onnx"""] )
| 74 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self ,A ,A=7 ,A=3 ,A=18 ,A=30 ,A=400 ,A=True ,A=None ,A=True ,A=None ,):
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
def _UpperCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ):
UpperCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_center_crop""" ) )
self.assertTrue(hasattr(A ,"""crop_size""" ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _UpperCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _UpperCamelCase ( self ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 74 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self ,A ,A=7 ,A=3 ,A=18 ,A=30 ,A=400 ,A=True ,A=None ,A=True ,):
UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_normalize
def _UpperCamelCase ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ):
UpperCAmelCase = ImageGPTImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""clusters""" ) )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_normalize""" ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = os.path.join(A ,"""image_processor.json""" )
image_processor_first.to_json_file(A )
UpperCAmelCase = self.image_processing_class.from_json_file(A ).to_dict()
UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A )
UpperCAmelCase = self.image_processing_class.from_pretrained(A ).to_dict()
UpperCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def _UpperCamelCase ( self ):
pass
def _a ( ):
"""simple docstring"""
UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCAmelCase = Image.open(dataset[4]["""file"""] )
UpperCAmelCase = Image.open(dataset[5]["""file"""] )
UpperCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCAmelCase = prepare_images()
# test non-batched
UpperCAmelCase = image_processing(images[0] ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1_024) )
UpperCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A )
# test batched
UpperCAmelCase = image_processing(A ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1_024) )
UpperCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A )
| 74 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase = tuple[int, int, int]
_UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_UpperCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_UpperCamelCase = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_UpperCamelCase = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_UpperCamelCase = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_UpperCamelCase = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_UpperCamelCase = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_UpperCamelCase = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_UpperCamelCase = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_UpperCamelCase = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_UpperCamelCase = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_UpperCamelCase = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (unique_rotsel := len(set(_snake_case ) )) < 3:
UpperCAmelCase = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_snake_case )
# Checks if rotor positions are valid
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotpos
if not 0 < rotorposa <= len(_snake_case ):
UpperCAmelCase = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_snake_case )
if not 0 < rotorposa <= len(_snake_case ):
UpperCAmelCase = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_snake_case )
if not 0 < rotorposa <= len(_snake_case ):
UpperCAmelCase = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_snake_case )
# Validates string and returns dict
UpperCAmelCase = _plugboard(_snake_case )
return rotpos, rotsel, pbdict
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
UpperCAmelCase = F'''Plugboard setting isn\'t type string ({type(_snake_case )})'''
raise TypeError(_snake_case )
elif len(_snake_case ) % 2 != 0:
UpperCAmelCase = F'''Odd number of symbols ({len(_snake_case )})'''
raise Exception(_snake_case )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
UpperCAmelCase = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase = F'''\'{i}\' not in list of symbols'''
raise Exception(_snake_case )
elif i in tmppbl:
UpperCAmelCase = F'''Duplicate symbol ({i})'''
raise Exception(_snake_case )
else:
tmppbl.add(_snake_case )
del tmppbl
# Created the dictionary
UpperCAmelCase = {}
for j in range(0 , len(_snake_case ) - 1 , 2 ):
UpperCAmelCase = pbstring[j + 1]
UpperCAmelCase = pbstring[j]
return pb
def _a ( _snake_case , _snake_case , _snake_case = (rotora, rotora, rotora) , _snake_case = "" , ):
"""simple docstring"""
UpperCAmelCase = text.upper()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _validator(
_snake_case , _snake_case , plugb.upper() )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotor_position
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase = abc.index(_snake_case ) + rotorposa
UpperCAmelCase = rotora[index % len(_snake_case )]
# rotor rb --------------------------
UpperCAmelCase = abc.index(_snake_case ) + rotorposa
UpperCAmelCase = rotora[index % len(_snake_case )]
# rotor rc --------------------------
UpperCAmelCase = abc.index(_snake_case ) + rotorposa
UpperCAmelCase = rotora[index % len(_snake_case )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase = reflector[symbol]
# 2nd rotors
UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa]
UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa]
UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_snake_case ):
UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_snake_case ):
UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_snake_case ):
UpperCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_snake_case )
return "".join(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = """This is my Python script that emulates the Enigma machine from WWII."""
_UpperCamelCase = (1, 1, 1)
_UpperCamelCase = """pictures"""
_UpperCamelCase = (rotora, rotora, rotora)
_UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 74 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__ ( folder_based_builder.FolderBasedBuilderConfig ):
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowerCamelCase__ ( folder_based_builder.FolderBasedBuilder ):
SCREAMING_SNAKE_CASE = datasets.Audio()
SCREAMING_SNAKE_CASE = '''audio'''
SCREAMING_SNAKE_CASE = AudioFolderConfig
SCREAMING_SNAKE_CASE = 42 # definition at the bottom of the script
SCREAMING_SNAKE_CASE = AudioClassification(audio_column='''audio''' , label_column='''label''' )
_UpperCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_UpperCamelCase = AUDIO_EXTENSIONS
| 74 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( _snake_case ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = create_tensor(_snake_case )
UpperCAmelCase = gather(_snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [state.process_index]
UpperCAmelCase = gather_object(_snake_case )
assert len(_snake_case ) == state.num_processes, F'''{gathered_obj}, {len(_snake_case )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = create_tensor(_snake_case )
UpperCAmelCase = broadcast(_snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( _snake_case ):
"""simple docstring"""
if state.is_main_process:
UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase = pad_across_processes(_snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( _snake_case ):
"""simple docstring"""
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_snake_case )
UpperCAmelCase = reduce(_snake_case , """sum""" )
UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_snake_case , _snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def _a ( _snake_case ):
"""simple docstring"""
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_snake_case )
UpperCAmelCase = reduce(_snake_case , """mean""" )
UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_snake_case , _snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def _a ( _snake_case ):
"""simple docstring"""
main()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_snake_case )
state.print("""testing gather_object""" )
test_gather_object(_snake_case )
state.print("""testing broadcast""" )
test_broadcast(_snake_case )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_snake_case )
state.print("""testing reduce_sum""" )
test_reduce_sum(_snake_case )
state.print("""testing reduce_mean""" )
test_reduce_mean(_snake_case )
if __name__ == "__main__":
main()
| 74 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
_UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _UpperCamelCase ( self ):
UpperCAmelCase = {}
if self.train_dir is not None:
UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase = self.validation_dir
UpperCAmelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class lowerCamelCase__ :
def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ):
UpperCAmelCase = input_size
UpperCAmelCase = mask_patch_size
UpperCAmelCase = model_patch_size
UpperCAmelCase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
UpperCAmelCase = self.input_size // self.mask_patch_size
UpperCAmelCase = self.mask_patch_size // self.model_patch_size
UpperCAmelCase = self.rand_size**2
UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase = np.zeros(self.token_count ,dtype=A )
UpperCAmelCase = 1
UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] )
UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase = split["""train"""]
UpperCAmelCase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case , """decoder_type""" ):
UpperCAmelCase = """simmim"""
# adapt config
UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
UpperCAmelCase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
UpperCAmelCase = ds["""train"""].column_names
else:
UpperCAmelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase = """image"""
elif "img" in column_names:
UpperCAmelCase = """img"""
else:
UpperCAmelCase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase = Compose(
[
Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_snake_case ):
UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]]
UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _snake_case )
trainer.save_metrics("""eval""" , _snake_case )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''vqvae''']
def __init__( self ,A ,A ,A ,A ,):
super().__init__()
self.register_modules(unet=A ,scheduler=A ,mel=A ,vqvae=A )
def _UpperCamelCase ( self ):
return 50 if isinstance(self.scheduler ,A ) else 1_000
@torch.no_grad()
def __call__( self ,A = 1 ,A = None ,A = None ,A = 0 ,A = 0 ,A = None ,A = None ,A = 0 ,A = 0 ,A = None ,A = 0 ,A = None ,A = None ,A=True ,):
UpperCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A )
UpperCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=A ,device=self.device ,)
UpperCAmelCase = noise
UpperCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A ,A )
UpperCAmelCase = self.mel.audio_slice_to_image(A )
UpperCAmelCase = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase = (input_image / 255) * 2 - 1
UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(A ,0 ) ).latent_dist.sample(
generator=A )[0]
UpperCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase = self.scheduler.add_noise(A ,A ,self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase = int(mask_start_secs * pixels_per_second )
UpperCAmelCase = int(mask_end_secs * pixels_per_second )
UpperCAmelCase = self.scheduler.add_noise(A ,A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,A ):
UpperCAmelCase = self.unet(A ,A ,A )["""sample"""]
else:
UpperCAmelCase = self.unet(A ,A )["""sample"""]
if isinstance(self.scheduler ,A ):
UpperCAmelCase = self.scheduler.step(
model_output=A ,timestep=A ,sample=A ,eta=A ,generator=A ,)["""prev_sample"""]
else:
UpperCAmelCase = self.scheduler.step(
model_output=A ,timestep=A ,sample=A ,generator=A ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase = self.vqvae.decode(A )["""sample"""]
UpperCAmelCase = (images / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
UpperCAmelCase = (images * 255).round().astype("""uint8""" )
UpperCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCAmelCase = [self.mel.image_to_audio(A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(A ) )
@torch.no_grad()
def _UpperCamelCase ( self ,A ,A = 50 ):
assert isinstance(self.scheduler ,A )
self.scheduler.set_timesteps(A )
UpperCAmelCase = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase = (sample / 255) * 2 - 1
UpperCAmelCase = torch.Tensor(A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase = self.scheduler.alphas_cumprod[t]
UpperCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = self.unet(A ,A )["""sample"""]
UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( A ,A ,A ):
UpperCAmelCase = acos(torch.dot(torch.flatten(A ) ,torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) )
return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
| 74 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCamelCase = True
from torch.cuda.amp import autocast
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase = logging.INFO
logger.setLevel(_snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
SCREAMING_SNAKE_CASE = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
SCREAMING_SNAKE_CASE = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self ,A ):
# reformat list to dict and set to pytorch format
UpperCAmelCase = self.feature_extractor.pad(
A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
UpperCAmelCase = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase = 1
UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = 0
UpperCAmelCase = max_gumbel_temp
UpperCAmelCase = min_gumbel_temp
UpperCAmelCase = gumbel_temp_decay
def _UpperCamelCase ( self ,A ,A ):
model.train()
UpperCAmelCase = self._prepare_inputs(A )
if self.use_amp:
with autocast():
UpperCAmelCase = self.compute_loss(A ,A )
else:
UpperCAmelCase = self.compute_loss(A ,A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A ).backward()
elif self.use_apex:
with amp.scale_loss(A ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_snake_case , _snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case )
def prepare_dataset(_snake_case ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase = datasets.map(
_snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
UpperCAmelCase = vectorized_datasets.filter(
lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_snake_case ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase = vectorized_datasets.map(
_snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
UpperCAmelCase = WavaVecaForPreTraining(_snake_case )
UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case )
UpperCAmelCase = WavaVecaPreTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = 30
UpperCAmelCase = self.seq_length + self.mem_len
UpperCAmelCase = 15
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = [10, 50, 80]
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 128
UpperCAmelCase = 2
UpperCAmelCase = 2
UpperCAmelCase = None
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 3
UpperCAmelCase = self.vocab_size - 1
UpperCAmelCase = 0.01
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLLMHeadModel(A )
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = TFTransfoXLForSequenceClassification(A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _UpperCamelCase ( self ):
self.model_tester.set_seed()
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
UpperCAmelCase = model.get_bias()
assert name is None
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
def _UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _UpperCamelCase ( self ):
pass
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
UpperCAmelCase = model(A )["""last_hidden_state"""]
UpperCAmelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ):
if vertex not in self.adjacency:
UpperCAmelCase = {}
self.num_vertices += 1
def _UpperCamelCase ( self ,A ,A ,A ):
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
UpperCAmelCase = weight
UpperCAmelCase = weight
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
UpperCAmelCase = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase = edges[i][2] + 1
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = weight
UpperCAmelCase = weight
def __str__( self ):
UpperCAmelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _UpperCamelCase ( self ):
return self.adjacency.keys()
@staticmethod
def _UpperCamelCase ( A=None ,A=None ):
UpperCAmelCase = Graph()
if vertices is None:
UpperCAmelCase = []
if edges is None:
UpperCAmelCase = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = {}
UpperCAmelCase = {}
def __len__( self ):
return len(self.parent )
def _UpperCamelCase ( self ,A ):
if item in self.parent:
return self.find(A )
UpperCAmelCase = item
UpperCAmelCase = 0
return item
def _UpperCamelCase ( self ,A ):
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
UpperCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self.find(A )
UpperCAmelCase = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase = roota
return roota
return None
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = graph.num_vertices
UpperCAmelCase = Graph.UnionFind()
UpperCAmelCase = []
while num_components > 1:
UpperCAmelCase = {}
for vertex in graph.get_vertices():
UpperCAmelCase = -1
UpperCAmelCase = graph.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edge
UpperCAmelCase = union_find.find(A )
UpperCAmelCase = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A ,A )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase = num_components - 1
UpperCAmelCase = Graph.build(edges=A )
return mst
| 74 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 1 |
"""simple docstring"""
import os
import sys
import unittest
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCamelCase = os.path.join(git_repo_path, """src""", """diffusers""")
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = find_backend(""" if not is_torch_available():""" )
self.assertEqual(A ,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(A ,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(A ,"""torch_and_transformers_and_onnx""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" ,A )
self.assertIn("""torch_and_transformers""" ,A )
self.assertIn("""flax_and_transformers""" ,A )
self.assertIn("""torch_and_transformers_and_onnx""" ,A )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" ,objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" ,objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" ,objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" ,objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" ,objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" ,objects["""torch_and_transformers_and_onnx"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_dummy_object("""CONSTANT""" ,"""'torch'""" )
self.assertEqual(A ,"""\nCONSTANT = None\n""" )
UpperCAmelCase = create_dummy_object("""function""" ,"""'torch'""" )
self.assertEqual(
A ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
UpperCAmelCase = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
UpperCAmelCase = create_dummy_object("""FakeClass""" ,"""'torch'""" )
self.assertEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
UpperCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] ,A )
| 74 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase__ :
def __init__( self ,A ,A ):
if len(A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase = list(A )
UpperCAmelCase = degree
def __add__( self ,A ):
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,A )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,A )
def __sub__( self ,A ):
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ):
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,A ):
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCamelCase ( self ):
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,A )
def _UpperCamelCase ( self ,A = 0 ):
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,A )
def __eq__( self ,A ):
if not isinstance(A ,A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,A ):
return not self.__eq__(A )
| 74 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ):
# we need a list not a string, so do something to change the type
UpperCAmelCase = arr.split(""",""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = [int(self.array[0] )] * len(self.array )
UpperCAmelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 ,len(self.array ) ):
UpperCAmelCase = max(
int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) )
UpperCAmelCase = max(sum_value[i] ,rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCamelCase = input("""please input some numbers:""")
_UpperCamelCase = SubArray(whole_array)
_UpperCamelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 74 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
super().__init__(*A ,**A )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self ,A=None ):
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self ,A ,**A ):
return super().__call__(A ,**A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = load_image(A )
UpperCAmelCase = self.image_processor(images=A ,return_tensors=self.framework )
return model_inputs
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.model(**A )
return model_outputs
def _UpperCamelCase ( self ,A ,A=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(A )
elif self.framework == "tf":
UpperCAmelCase = stable_softmax(model_outputs.logits ,axis=-1 )[0]
UpperCAmelCase = tf.math.top_k(A ,k=A )
UpperCAmelCase , UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A ,A )]
| 74 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
UpperCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase = value
return new_state_dict
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCAmelCase = model(_snake_case )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 74 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self ,A = 1_000 ,A = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase = 4
# running values
UpperCAmelCase = []
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = num_inference_steps
UpperCAmelCase = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
UpperCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
UpperCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase = timesteps.to(A )
UpperCAmelCase = []
def _UpperCamelCase ( self ,A ,A ,A ,A = True ,):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase = timestep_index + 1
UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
UpperCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase = self._get_prev_sample(A ,A ,A ,A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _UpperCamelCase ( self ,A ,*A ,**A ):
return sample
def _UpperCamelCase ( self ,A ,A ,A ,A ):
UpperCAmelCase = self.alphas[timestep_index]
UpperCAmelCase = self.betas[timestep_index]
UpperCAmelCase = self.alphas[prev_timestep_index]
UpperCAmelCase = self.betas[prev_timestep_index]
UpperCAmelCase = (sample - sigma * ets) / max(A ,1e-8 )
UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
return self.config.num_train_timesteps
| 74 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _a ( _snake_case = True , *_snake_case , **_snake_case ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCAmelCase = False
if main_process_only:
UpperCAmelCase = PartialState().local_process_index == 0
return _tqdm(*_snake_case , **_snake_case , disable=_snake_case )
| 74 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = DebertaVaTokenizer
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = DebertaVaTokenizer(A ,unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """this is a test"""
UpperCAmelCase = """this is a test"""
return input_text, output_text
def _UpperCamelCase ( self ):
UpperCAmelCase = """<pad>"""
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""[PAD]""" )
self.assertEqual(len(A ) ,30_001 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,30_000 )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
UpperCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
UpperCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(A )
UpperCAmelCase = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = """This is a test"""
UpperCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
UpperCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCAmelCase = DebertaVaTokenizer(A ,keep_accents=A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,keep_accents=A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = DebertaVaTokenizer(A )
UpperCAmelCase = tokenizer.encode("""sequence builders""" )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,A ,)
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""microsoft/deberta-v2-xlarge""" ,revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" ,)
| 74 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 1 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A )
self.assertEqual(A ,["""c"""] )
self.assertEqual(A ,[2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A )
self.assertEqual(A ,["""a""", """c"""] )
self.assertEqual(A ,[-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A )
# Out features must be a list
with self.assertRaises(A ):
verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(A ):
verify_out_features_out_indices(A ,0 ,["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(A ):
verify_out_features_out_indices(A ,(0, 1) ,["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(A ):
verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(A ):
verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["""a""", """b""", """c"""]
UpperCAmelCase = ["""a""", """c"""]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features ,["""a""", """b"""] )
self.assertEqual(backbone.out_indices ,[0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features ,["""a""", """c"""] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 74 |
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''yolos'''
def __init__( self ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.0 ,A=0.0 ,A=0.02 ,A=1e-1_2 ,A=[512, 864] ,A=16 ,A=3 ,A=True ,A=100 ,A=True ,A=False ,A=1 ,A=5 ,A=2 ,A=5 ,A=2 ,A=0.1 ,**A ,):
super().__init__(**A )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = num_detection_tokens
UpperCAmelCase = use_mid_position_embeddings
UpperCAmelCase = auxiliary_loss
# Hungarian matcher
UpperCAmelCase = class_cost
UpperCAmelCase = bbox_cost
UpperCAmelCase = giou_cost
# Loss coefficients
UpperCAmelCase = bbox_loss_coefficient
UpperCAmelCase = giou_loss_coefficient
UpperCAmelCase = eos_coefficient
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _UpperCamelCase ( self ):
return 1e-4
@property
def _UpperCamelCase ( self ):
return 12
| 74 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
return len(set(_snake_case ) ) == len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.