code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
a__ : str = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 714
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
a__ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _lowerCAmelCase ( A__ , A__=False ):
lowercase__, lowercase__ = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def _lowerCAmelCase ( A__ ):
lowercase__ = {}
lowercase__ = r'.*sequential.(\d+).*'
lowercase__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
lowercase__ = re.match(A__ , A__ ).group(1 )
lowercase__ = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
lowercase__ = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase__ = 1 if projecton_layer == 0 else 2
lowercase__ = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase__ = value
lowercase__ = mixed_qkv.size(0 ) // 3
lowercase__ = mixed_qkv[:qkv_dim]
lowercase__ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase__ = mixed_qkv[qkv_dim * 2 :]
lowercase__ = query_layer
lowercase__ = key_layer
lowercase__ = value_layer
else:
lowercase__ = value
return model_state_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__=False ):
lowercase__, lowercase__ = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
lowercase__ = clap_model.state_dict()
lowercase__ = rename_state_dict(A__ )
lowercase__ = ClapConfig()
lowercase__ = enable_fusion
lowercase__ = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
a__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 715
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[str] = "▁"
a__ : Dict = {"vocab_file": "sentencepiece.bpe.model"}
a__ : List[str] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
a__ : List[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
a__ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = ["input_ids", "attention_mask"]
A : List[int] = []
A : List[int] = []
def __init__( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : str="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase))
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model)
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase)
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = src_lang if src_lang is not None else 'en_XX'
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : List[str] , lowerCAmelCase : Dict) -> None:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(lowerCAmelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : str , lowerCAmelCase : int) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
lowercase__ = False
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
lowercase__ = [1] * len(self.prefix_tokens)
lowercase__ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase)) + ([0] * len(lowerCAmelCase)) + suffix_ones
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
lowercase__ = src_lang
lowercase__ = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = self.convert_tokens_to_ids(lowerCAmelCase)
lowercase__ = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[src_lang]
lowercase__ = [self.cur_lang_code_id]
lowercase__ = [self.eos_token_id]
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[tgt_lang]
lowercase__ = [self.cur_lang_code_id]
lowercase__ = [self.eos_token_id]
| 716
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 0
|
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( A__ , A__ = 2 , A__ = 1 , A__ = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A__ , A__ , A__ ) -> int:
return (pow(A__ , 2 ) + step) % modulus
for _ in range(A__ ):
# These track the position within the cycle detection logic.
lowercase__ = seed
lowercase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase__ = rand_fn(A__ , A__ , A__ )
lowercase__ = rand_fn(A__ , A__ , A__ )
lowercase__ = rand_fn(A__ , A__ , A__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase__ = gcd(hare - tortoise , A__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a__ : Union[str, Any] = parser.parse_args()
a__ : Union[str, Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
a__ : Any = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 717
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase__ = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
lowercase__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = True
# hparam_utils.py hparams
lowercase__ = 0.66_46_94
lowercase__ = 0.20_79_51
lowercase__ = 0.12_11_94
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = 0.0_35_25_13
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase__ = 4
lowercase__ = False
# hparam_utils.py hparams
lowercase__ = 36.45_19
lowercase__ = 0.90_34_21
lowercase__ = 222.088
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 0.76_31_41
lowercase__ = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
lowercase__ = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
lowercase__ = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase__ = TapasModel(config=A__ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__ , A__ , A__ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowercase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(A__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 718
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 0
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 719
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 0
|
def _lowerCAmelCase ( A__ = 1_000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 720
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=32 * 4 , lowerCAmelCase : int=32 * 6 , lowerCAmelCase : int=4 , lowerCAmelCase : Tuple=32 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = is_training
lowercase__ = use_auxiliary_loss
lowercase__ = num_queries
lowercase__ = num_channels
lowercase__ = min_size
lowercase__ = max_size
lowercase__ = num_labels
lowercase__ = mask_feature_size
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowerCAmelCase)
lowercase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase)
lowercase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase) > 0.5
).float()
lowercase__ = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase) > 0.5).long()
lowercase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = output.encoder_hidden_states
lowercase__ = output.pixel_decoder_hidden_states
lowercase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowerCAmelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowerCAmelCase) , config.decoder_config.decoder_layers)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str=False) -> str:
"""simple docstring"""
with torch.no_grad():
lowercase__ = MaskFormerModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , output_hidden_states=lowerCAmelCase)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = MaskFormerForInstanceSegmentation(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
def comm_check_on_output(lowerCAmelCase : List[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowercase__ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
comm_check_on_output(lowerCAmelCase)
lowercase__ = model(
pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase)
comm_check_on_output(lowerCAmelCase)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A : Any = False
A : Tuple = False
A : Union[str, Any] = False
A : List[str] = False
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
lowercase__ = MaskFormerModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
lowercase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase__ = MaskFormerModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (self.model_tester.min_size,) * 2
lowercase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase),
'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase).long(),
}
lowercase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(lowerCAmelCase)
lowercase__ = model(**lowerCAmelCase)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase).to(lowerCAmelCase)
lowercase__ = model(**lowerCAmelCase , output_attentions=lowerCAmelCase)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ = self.all_model_classes[1]
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.train()
lowercase__ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase).loss
loss.backward()
def UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.all_model_classes[1]
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.train()
lowercase__ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase)
lowercase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
a__ : Any = 1E-4
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
lowercase__ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(lowerCAmelCase)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
lowercase__ = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
lowercase__ = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
lowercase__ = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowercase__ = torch.tensor(lowerCAmelCase).to(lowerCAmelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowercase__ = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowercase__ = torch.tensor(lowerCAmelCase).to(lowerCAmelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowercase__ = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = image_processor(
[np.zeros((3, 8_00, 13_33)), np.zeros((3, 8_00, 13_33))] , segmentation_maps=[np.zeros((3_84, 3_84)).astype(np.floataa), np.zeros((3_84, 3_84)).astype(np.floataa)] , return_tensors='pt' , )
lowercase__ = inputs['pixel_values'].to(lowerCAmelCase)
lowercase__ = [el.to(lowerCAmelCase) for el in inputs['mask_labels']]
lowercase__ = [el.to(lowerCAmelCase) for el in inputs['class_labels']]
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
self.assertTrue(outputs.loss is not None)
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from PIL import Image
def a ( SCREAMING_SNAKE_CASE_ : Image , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
def brightness(SCREAMING_SNAKE_CASE_ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__UpperCAmelCase : List[Any] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def a ( ):
"""simple docstring"""
UpperCamelCase : str = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
UpperCamelCase , UpperCamelCase : Any = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
UpperCamelCase : List[Any] = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
UpperCamelCase : Dict = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from math import factorial
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = real
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = [1] * rank
else:
UpperCamelCase : List[str] = rank
def __repr__( self ):
"""simple docstring"""
return (
f"""{self.real}+"""
f"""{"+".join(str(__SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __SCREAMING_SNAKE_CASE )
def __add__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase : List[str] = self.duals.copy()
UpperCamelCase : Any = other.duals.copy()
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
UpperCamelCase : Union[str, Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE )
__UpperCamelCase : Dict = __add__
def __sub__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self + other * -1
def __mul__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE )
__UpperCamelCase : str = __mul__
def __truediv__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase : Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not callable(SCREAMING_SNAKE_CASE_ ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase : str = Dual(SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : List[str] = func(SCREAMING_SNAKE_CASE_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase : Tuple = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 1
|
import pytest
__UpperCAmelCase : Tuple = "__dummy_dataset1__"
__UpperCAmelCase : Tuple = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def a ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = dataset_loading_script_name
UpperCamelCase : List[str] = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = script_dir / F"""{script_name}.py"""
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return str(SCREAMING_SNAKE_CASE_ )
| 643
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 1
|
from heapq import heappop, heappush
import numpy as np
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : bool , ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[str] = grid.shape
UpperCamelCase : Any = [-1, 1, 0, 0]
UpperCamelCase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase , UpperCamelCase : Optional[Any] = [(0, source)], set()
UpperCamelCase : Optional[int] = np.full((rows, cols) , np.inf )
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = np.empty((rows, cols) , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = None
while queue:
((UpperCamelCase) , (UpperCamelCase)) : Union[str, Any] = heappop(SCREAMING_SNAKE_CASE_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase : Optional[Any] = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase , UpperCamelCase : Any = predecessors[x, y]
path.append(SCREAMING_SNAKE_CASE_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase , UpperCamelCase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(SCREAMING_SNAKE_CASE_ , (dist + 1, (nx, ny)) )
UpperCamelCase : List[str] = dist + 1
UpperCamelCase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = "camembert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : str = classifier_dropout
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
from math import factorial
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE_ ) // (factorial(SCREAMING_SNAKE_CASE_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 643
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Any = int(SCREAMING_SNAKE_CASE_ )
assert noofclusters < len(SCREAMING_SNAKE_CASE_ )
# Find out the dimensionality
UpperCamelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase : List[Any] = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
shuffle(SCREAMING_SNAKE_CASE_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase : List[str] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase : List[str] = tf.placeholder('''float64''' , [dim] )
UpperCamelCase : int = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase : int = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase : str = tf.placeholder('''int32''' )
UpperCamelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase : List[str] = tf.reduce_mean(SCREAMING_SNAKE_CASE_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase : Any = tf.placeholder('''float''' , [dim] )
UpperCamelCase : List[Any] = tf.placeholder('''float''' , [dim] )
UpperCamelCase : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase : Optional[int] = tf.placeholder('''float''' , [noofclusters] )
UpperCamelCase : Optional[int] = tf.argmin(SCREAMING_SNAKE_CASE_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase : Dict = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase : Optional[int] = 1_0_0
for _ in range(SCREAMING_SNAKE_CASE_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase : List[Any] = [
sess.run(SCREAMING_SNAKE_CASE_ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase : str = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE_ ):
# Collect all the vectors assigned to this cluster
UpperCamelCase : Optional[int] = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase : List[str] = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase : Any = sess.run(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = sess.run(SCREAMING_SNAKE_CASE_ )
return centroids, assignments
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=17 , __SCREAMING_SNAKE_CASE=23 , __SCREAMING_SNAKE_CASE=11 , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = act_dim
UpperCamelCase : int = state_dim
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Dict = max_length
UpperCamelCase : Tuple = is_training
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase : List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCamelCase : Dict = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = DecisionTransformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase : List[str] = ()
__UpperCamelCase : List[str] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase : Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = DecisionTransformerModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = DecisionTransformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : int = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase : Dict = 10 # defined by the RL environment, may be normalized
UpperCamelCase : List[Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCamelCase : Dict = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model.config
torch.manual_seed(0 )
UpperCamelCase : List[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset()
UpperCamelCase : Dict = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase : Optional[Any] = state
UpperCamelCase : List[str] = torch.zeros(1 , 0 , config.act_dim , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
UpperCamelCase : Optional[Any] = torch.zeros(1 , 0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
UpperCamelCase : List[str] = torch.tensor(0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 )
for step in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = model(
states=__SCREAMING_SNAKE_CASE , actions=__SCREAMING_SNAKE_CASE , rewards=__SCREAMING_SNAKE_CASE , returns_to_go=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase : str = action_pred[0, -1]
UpperCamelCase : str = torch.cat([states, state] , dim=1 )
UpperCamelCase : Any = returns_to_go[0, -1] - reward
UpperCamelCase : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase : Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
| 643
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCamelCase : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCamelCase : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
UpperCamelCase : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCamelCase : Optional[int] = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
UpperCamelCase : List[Any] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCamelCase : Optional[Any] = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
UpperCamelCase : List[str] = text_classifier('''This is great !''' , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCamelCase : int = text_classifier('''This is great !''' , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
UpperCamelCase : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCamelCase : List[str] = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
import torch
UpperCamelCase : Optional[int] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
UpperCamelCase : List[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
UpperCamelCase : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = pipeline('''text-classification''' )
UpperCamelCase : Tuple = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCamelCase : int = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCamelCase : Tuple = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = pipeline('''text-classification''' , framework='''tf''' )
UpperCamelCase : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCamelCase : Optional[int] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCamelCase : int = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = TextClassificationPipeline(model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCamelCase : Tuple = '''HuggingFace is in'''
UpperCamelCase : Tuple = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
UpperCamelCase : int = ['''HuggingFace is in ''', '''Paris is in France''']
UpperCamelCase : Any = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}, {'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCamelCase : int = text_classifier(__SCREAMING_SNAKE_CASE , top_k=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] * N, [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] * N] , )
UpperCamelCase : Tuple = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
UpperCamelCase : Union[str, Any] = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCamelCase : str = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
text_classifier(__SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCamelCase : str = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''label''': ANY(__SCREAMING_SNAKE_CASE ), '''score''': ANY(__SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : int = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__UpperCAmelCase : Union[str, Any] = {"facebook/blenderbot_small-90M": 512}
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : int = set()
UpperCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : Tuple = char
UpperCamelCase : Dict = set(SCREAMING_SNAKE_CASE_ )
return pairs
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="__start__" , __SCREAMING_SNAKE_CASE="__end__" , __SCREAMING_SNAKE_CASE="__unk__" , __SCREAMING_SNAKE_CASE="__null__" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
UpperCamelCase : str = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
UpperCamelCase : Dict = merges_handle.read().split('''\n''' )[1:-1]
UpperCamelCase : List[str] = [tuple(merge.split() ) for merge in merges]
UpperCamelCase : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Optional[int] = {}
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase : int = re.sub('''([.,!?()])''' , R''' \1''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = re.sub('''(\')''' , R''' \1 ''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = re.sub(R'''\s{2,}''' , ''' ''' , __SCREAMING_SNAKE_CASE )
if "\n" in token:
UpperCamelCase : Tuple = token.replace('''\n''' , ''' __newln__''' )
UpperCamelCase : int = token.split(''' ''' )
UpperCamelCase : int = []
for token in tokens:
if not len(__SCREAMING_SNAKE_CASE ):
continue
UpperCamelCase : Optional[int] = token.lower()
UpperCamelCase : Union[str, Any] = tuple(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCamelCase : List[str] = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
words.append(__SCREAMING_SNAKE_CASE )
continue
while True:
UpperCamelCase : List[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : int = bigram
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase : List[Any] = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
UpperCamelCase : Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase : str = get_pairs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = '''@@ '''.join(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = word[:-4]
UpperCamelCase : str = word
words.append(__SCREAMING_SNAKE_CASE )
return " ".join(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : Any = re.findall(R'''\S+\n?''' , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = token.lower()
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = ''' '''.join(__SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCamelCase : Tuple = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCamelCase : Union[str, Any] = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 643
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE_ )
return flax_params
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {}
UpperCamelCase : int = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCamelCase : int = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase : Any = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase : Optional[int] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase : int = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase : Union[str, Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = flax_dict[key]
UpperCamelCase : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase : Union[str, Any] = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase : Optional[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
"""simple docstring"""
UpperCamelCase : Dict = get_flax_param(SCREAMING_SNAKE_CASE_ )
if not use_large:
UpperCamelCase : List[str] = PixaStructVisionConfig()
UpperCamelCase : str = PixaStructTextConfig()
else:
UpperCamelCase : List[Any] = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
UpperCamelCase : str = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
UpperCamelCase : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCamelCase : Optional[Any] = PixaStructImageProcessor()
UpperCamelCase : Tuple = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
if use_large:
UpperCamelCase : Dict = 4_0_9_6
UpperCamelCase : Union[str, Any] = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__UpperCAmelCase : List[str] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
__UpperCAmelCase : Dict = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
__UpperCAmelCase : List[str] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
__UpperCAmelCase : int = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
__UpperCAmelCase : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 10, 100] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3.0 ):
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__SCREAMING_SNAKE_CASE ) as executor:
UpperCamelCase : Tuple = []
UpperCamelCase : List[str] = Counter()
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = defaultdict(__SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
UpperCamelCase : Any = candidate + '''\n''' + test_case
UpperCamelCase : Tuple = (test_program, timeout, task_id, completion_id[task_id])
UpperCamelCase : Any = executor.submit(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
futures.append(__SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
UpperCamelCase , UpperCamelCase : Tuple = [], []
for result in results.values():
result.sort()
UpperCamelCase : Optional[int] = [r[1]['''passed'''] for r in result]
total.append(len(__SCREAMING_SNAKE_CASE ) )
correct.append(sum(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : int = np.array(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = k
UpperCamelCase : Any = {f"""pass@{k}""": estimate_pass_at_k(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
def estimator(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = itertools.repeat(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
else:
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = iter(SCREAMING_SNAKE_CASE_ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) for n, c in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] )
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = "https://openaipublic.azureedge.net/jukebox/models/"
__UpperCAmelCase : Optional[Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCamelCase : Any = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCamelCase : Optional[int] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCamelCase : int = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
UpperCamelCase : Union[str, Any] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCamelCase : Union[str, Any] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCamelCase : Any = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase : Union[str, Any] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCamelCase : str = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : str = {}
import re
UpperCamelCase : int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase : Any = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase : List[str] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase : Dict = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase : Union[str, Any] = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase : Optional[Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase : Optional[int] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase : Tuple = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = regex_match.groups()
UpperCamelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase : str = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = regex_match.groups()
UpperCamelCase : int = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase : Any = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase : List[Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCamelCase : Any = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase : Any = prefix + resnet_block
UpperCamelCase : List[str] = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = regex_match.groups()
UpperCamelCase : Tuple = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCamelCase : str = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = regex_match.groups()
UpperCamelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase : Optional[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase : List[str] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = regex_match.groups()
UpperCamelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase : int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase : str = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCamelCase : Tuple = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase : str = prefix + resnet_block
UpperCamelCase : Optional[Any] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = regex_match.groups()
UpperCamelCase : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCamelCase : Optional[int] = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = regex_match.groups()
UpperCamelCase : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase : Optional[Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase : int = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = regex_match.groups()
UpperCamelCase : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase : str = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCamelCase : Optional[int] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase : Optional[Any] = prefix + resnet_block
UpperCamelCase : List[str] = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = regex_match.groups()
UpperCamelCase : int = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCamelCase : Optional[Any] = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# keep original key
else:
UpperCamelCase : List[Any] = original_key
UpperCamelCase : List[str] = replace_key(SCREAMING_SNAKE_CASE_ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
UpperCamelCase : Optional[int] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
UpperCamelCase : List[Any] = original_key
UpperCamelCase : str = original_key
UpperCamelCase : List[str] = value
return new_dict
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Dict=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
UpperCamelCase : Tuple = requests.get(F"""{PREFIX}{file}""" , allow_redirects=SCREAMING_SNAKE_CASE_ )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=SCREAMING_SNAKE_CASE_ )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
UpperCamelCase : int = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCamelCase : Optional[Any] = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = JukeboxModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[Any] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
UpperCamelCase : int = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCamelCase : List[Any] = old_dic[k]
elif k.endswith('''.w''' ):
UpperCamelCase : Any = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase : Optional[int] = old_dic[k]
else:
UpperCamelCase : List[str] = old_dic[k]
UpperCamelCase : Union[str, Any] = '''vqvae''' if i == 0 else F"""priors.{3 - i}"""
UpperCamelCase : Optional[Any] = fix_jukebox_keys(SCREAMING_SNAKE_CASE_ , model.state_dict() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
weight_dict.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
return weight_dict
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from random import randint, random
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : int = 5 , ):
"""simple docstring"""
UpperCamelCase : List[Any] = [[-1] * number_of_cells] # Create a highway without any car
UpperCamelCase : List[Any] = 0
UpperCamelCase : Optional[int] = max(SCREAMING_SNAKE_CASE_ , 0 )
while i < number_of_cells:
UpperCamelCase : str = (
randint(0 , SCREAMING_SNAKE_CASE_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Dict = 0
UpperCamelCase : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE_ , -1 )
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
# Beforce calculations, the highway is empty
UpperCamelCase : Optional[Any] = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCamelCase : Dict = min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE_ )
# Number of empty cell before the next car
UpperCamelCase : str = get_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - 1
# We can't have the car causing an accident
UpperCamelCase : int = min(next_highway[car_index] , SCREAMING_SNAKE_CASE_ )
if random() < probability:
# Randomly, a driver will slow down
UpperCamelCase : Union[str, Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Any = len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = update(highway[i] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = [-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCamelCase : int = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCamelCase : Union[str, Any] = speed
highway.append(SCREAMING_SNAKE_CASE_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__UpperCAmelCase : Tuple = TypeVar("T")
__UpperCAmelCase : Dict = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U]):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = key
UpperCamelCase : Dict = val
UpperCamelCase : DoubleLinkedListNode[T, U] | None = None
UpperCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
"""simple docstring"""
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCAmelCase_ ( Generic[T, U]):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : Optional[Any] = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''DoubleLinkedList''']
UpperCamelCase : Any = self.head
while node.next is not None:
rep.append(str(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase : int = node
UpperCamelCase : Any = previous
UpperCamelCase : int = node
UpperCamelCase : Tuple = self.rear
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
UpperCamelCase : Any = node.next
UpperCamelCase : int = node.prev
UpperCamelCase : int = None
UpperCamelCase : str = None
return node
class UpperCAmelCase_ ( Generic[T, U]):
'''simple docstring'''
__UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCamelCase : Any = capacity
UpperCamelCase : Dict = 0
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
"""simple docstring"""
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return key in self.cache
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
UpperCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCamelCase : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__SCREAMING_SNAKE_CASE )
return node.val
self.miss += 1
return None
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__SCREAMING_SNAKE_CASE ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase : Union[str, Any] = DoubleLinkedListNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase : Optional[int] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase : Optional[int] = value
self.list.add(__SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE = 128 ):
"""simple docstring"""
def cache_decorator_inner(__SCREAMING_SNAKE_CASE ) -> Callable[..., U]:
def cache_decorator_wrapper(*__SCREAMING_SNAKE_CASE ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase : Any = LRUCache(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase : Optional[Any] = func(*__SCREAMING_SNAKE_CASE )
cls.decorator_function_to_instance_map[func].put(args[0] , __SCREAMING_SNAKE_CASE )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__SCREAMING_SNAKE_CASE , '''cache_info''' , __SCREAMING_SNAKE_CASE ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCAmelCase : Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__UpperCAmelCase : Tuple = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__UpperCAmelCase : Optional[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__SCREAMING_SNAKE_CASE , hypotheses=__SCREAMING_SNAKE_CASE , min_len=__SCREAMING_SNAKE_CASE , max_len=__SCREAMING_SNAKE_CASE )
}
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCAmelCase : str = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def a ( SCREAMING_SNAKE_CASE_ : str = "dhaka" , SCREAMING_SNAKE_CASE_ : int = 5 ):
"""simple docstring"""
UpperCamelCase : List[str] = min(SCREAMING_SNAKE_CASE_ , 5_0 ) # Prevent abuse!
UpperCamelCase : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase : int = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase : Union[str, Any] = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase : int = json.dumps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = json.loads(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE_ , )
if not matched_google_image_data:
return 0
UpperCamelCase : Dict = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE_ ) , )
UpperCamelCase : str = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE_ , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE_ ):
if index >= max_images:
return index
UpperCamelCase : Dict = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase : Optional[Any] = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase : Tuple = urllib.request.build_opener()
UpperCamelCase : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE_ , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__UpperCAmelCase : str = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCAmelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
default=_a, metadata={"help": "Model type selected in the list: " + ", ".join(_a)})
__UpperCamelCase : str = field(
default=_a, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
__UpperCamelCase : int = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__UpperCamelCase : int = field(
default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, )
__UpperCamelCase : int = field(
default=64, metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
}, )
__UpperCamelCase : int = field(
default=30, metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
}, )
__UpperCamelCase : bool = field(
default=_a, metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCamelCase : bool = field(
default=_a, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
__UpperCamelCase : float = field(
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
__UpperCamelCase : int = field(
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
__UpperCamelCase : int = field(
default=0, metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
}, )
__UpperCamelCase : int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "train"
__UpperCamelCase : Union[str, Any] = "dev"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : SquadDataTrainingArguments
__UpperCamelCase : List[SquadFeatures]
__UpperCamelCase : Split
__UpperCamelCase : bool
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = Split.train , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pt" , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = args
UpperCamelCase : List[str] = is_language_sensitive
UpperCamelCase : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase : str = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
UpperCamelCase : int = mode
# Load data features from cache or dataset file
UpperCamelCase : List[str] = '''v2''' if args.version_2_with_negative else '''v1'''
UpperCamelCase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Tuple = cached_features_file + '''.lock'''
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
UpperCamelCase : Union[str, Any] = time.time()
UpperCamelCase : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCamelCase : Tuple = self.old_features['''features''']
UpperCamelCase : List[str] = self.old_features.get('''dataset''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.old_features.get('''examples''' , __SCREAMING_SNAKE_CASE )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
UpperCamelCase : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
UpperCamelCase : int = self.processor.get_train_examples(args.data_dir )
UpperCamelCase , UpperCamelCase : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__SCREAMING_SNAKE_CASE , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Union[str, Any] = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __SCREAMING_SNAKE_CASE , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.features[i]
UpperCamelCase : Optional[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCamelCase : int = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCamelCase : Any = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCamelCase : str = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCamelCase : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCamelCase : int = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCamelCase : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCamelCase : str = torch.tensor(feature.start_position , dtype=torch.long )
UpperCamelCase : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = BlenderbotSmallTokenizer
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Optional[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase : Tuple = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''adapt act apte'''
UpperCamelCase : List[Any] = '''adapt act apte'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : List[str] = '''adapt act apte'''
UpperCamelCase : List[str] = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase : str = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
UpperCamelCase : Dict = '''I am a small frog.'''
UpperCamelCase : Tuple = tok([src_text] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : Tuple = tok.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase : List[Any] = '''I am a small frog .'''
UpperCamelCase : Union[str, Any] = '''.'''
UpperCamelCase : Dict = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase : Optional[int] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = [10, 20, 30, 40, 50, 60]
UpperCamelCase : int = [2, 4, 6, 8, 10, 12]
UpperCamelCase : List[str] = 100
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 210 )
def _lowercase ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _lowercase ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _lowercase ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _lowercase ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _lowercase ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 643
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 ):
"""simple docstring"""
UpperCamelCase : str = -1
UpperCamelCase : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase : Union[str, Any] = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase : List[str] = a * b * c
if candidate >= product:
UpperCamelCase : Any = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 643
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 1
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"help": "The output directory where the model will be written."}, )
__UpperCamelCase : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
}, )
__UpperCamelCase : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"})
__UpperCamelCase : Optional[str] = field(
default=_a, metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"})
def a ( ):
"""simple docstring"""
UpperCamelCase : str = HfArgumentParser((ModelArguments,) )
((UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCamelCase : List[Any] = True
UpperCamelCase : str = True
UpperCamelCase : List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCamelCase : Tuple = decoder_config.decoder_start_token_id
UpperCamelCase : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCamelCase : Optional[Any] = decoder_config.eos_token_id
UpperCamelCase : List[str] = decoder_start_token_id
UpperCamelCase : Dict = pad_token_id
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCamelCase : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 643
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : int = 4_0_0_0_0_0_0 ):
"""simple docstring"""
UpperCamelCase : Dict = [0, 1]
UpperCamelCase : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase : Optional[int] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase : Optional[int] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=8 ):
"""simple docstring"""
UpperCamelCase : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if latents is None:
UpperCamelCase : List[str] = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : Optional[Any] = latents.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
UpperCamelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCamelCase : Dict = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : Dict = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
UpperCamelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 100 , __SCREAMING_SNAKE_CASE = 4.0 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
UpperCamelCase : Dict = self._execution_device
UpperCamelCase : str = guidance_scale > 1.0
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCamelCase : Union[str, Any] = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase : Tuple = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase : Any = hint.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
UpperCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.scheduler.timesteps
UpperCamelCase : List[Any] = self.movq.config.latent_channels
UpperCamelCase , UpperCamelCase : Any = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
UpperCamelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCamelCase : Optional[int] = self.unet(
sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : int = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase : List[Any] = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Union[str, Any] = variance_pred.chunk(2 )
UpperCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : int = self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0]
# post-processing
UpperCamelCase : List[Any] = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : List[str] = image * 0.5 + 0.5
UpperCamelCase : List[str] = image.clamp(0 , 1 )
UpperCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 643
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Any = JukeboxTokenizer
__UpperCamelCase : Union[str, Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowercase ( self ):
"""simple docstring"""
import torch
UpperCamelCase : int = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCamelCase : Union[str, Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCamelCase : Dict = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
import torch
UpperCamelCase : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCamelCase : Union[str, Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCamelCase : List[Any] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
import math
__UpperCAmelCase : List[Any] = 10
__UpperCAmelCase : List[Any] = 7
__UpperCAmelCase : str = BALLS_PER_COLOUR * NUM_COLOURS
def a ( SCREAMING_SNAKE_CASE_ : int = 2_0 ):
"""simple docstring"""
UpperCamelCase : Dict = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 643
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 1
|
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {}
def _lowercase ( self ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , ''' -> ''' , ''' -> '''.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
UpperCamelCase : int = [to_vertex]
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = True
print(__SCREAMING_SNAKE_CASE , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCAmelCase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 643
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 1
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase : Tuple = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = "ernie_m"
__UpperCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __SCREAMING_SNAKE_CASE = 250_002 , __SCREAMING_SNAKE_CASE = 768 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 3_072 , __SCREAMING_SNAKE_CASE = "gelu" , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 514 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 1e-05 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Any = classifier_dropout
UpperCamelCase : List[Any] = is_decoder
UpperCamelCase : Union[str, Any] = act_dropout
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str = "cpu" , SCREAMING_SNAKE_CASE_ : Union[str, None] = None ):
"""simple docstring"""
UpperCamelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCamelCase : Dict = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase : Optional[int] = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 643
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE_ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE_ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Color current vertex
UpperCamelCase : Tuple = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ):
return True
# Backtrack
UpperCamelCase : List[str] = -1
return False
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[Any] = [-1] * len(SCREAMING_SNAKE_CASE_ )
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ):
return colored_vertices
return []
| 643
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase : int = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase : Any = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Union[str, Any] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__UpperCAmelCase : Dict = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"emoji": True,
},
}
]
__UpperCAmelCase : Optional[Any] = 0
for log in Path().glob("*.log"):
__UpperCAmelCase : List[Any] = 0
with open(log, "r") as f:
for line in f:
__UpperCAmelCase : Any = json.loads(line)
if line.get("nodeid", "") != "":
__UpperCAmelCase : str = line["nodeid"]
if line.get("duration", None) is not None:
__UpperCAmelCase : Union[str, Any] = f'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase : Any = []
log.unlink()
__UpperCAmelCase : List[str] = ""
__UpperCAmelCase : str = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[int] = {}
for test in failed_tests:
__UpperCAmelCase : int = test[0].split("::")
__UpperCAmelCase : Tuple = data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCAmelCase : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase : int = [test[0] for test in failed_table]
__UpperCAmelCase : List[str] = list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase : str = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase : int = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCAmelCase : str = "Too many failed tests, please see the full report in the Action results."
__UpperCAmelCase : Tuple = len(err) + 10
__UpperCAmelCase : int = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__UpperCAmelCase : List[str] = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCAmelCase : int = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCAmelCase : List[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__UpperCAmelCase : Any = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__UpperCAmelCase : int = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__UpperCAmelCase : Dict = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCAmelCase : Any = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase : Dict = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase : List[Any] = row[0]
else:
__UpperCAmelCase : Dict = ""
__UpperCAmelCase : Dict = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
__UpperCAmelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : List[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Dict = use_input_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : List[str] = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = True
UpperCamelCase : Dict = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : str = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[Any] = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCamelCase : Optional[int] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
UpperCamelCase : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
UpperCamelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = OpenLlamaModelTester(self )
UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Dict = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = 3
UpperCamelCase : int = input_dict['''input_ids''']
UpperCamelCase : str = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = 3
UpperCamelCase : Union[str, Any] = '''single_label_classification'''
UpperCamelCase : Any = input_dict['''input_ids''']
UpperCamelCase : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = 3
UpperCamelCase : Optional[int] = '''multi_label_classification'''
UpperCamelCase : str = input_dict['''input_ids''']
UpperCamelCase : str = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase : Dict = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Optional[int] = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
UpperCamelCase : int = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCamelCase : List[str] = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
UpperCamelCase : List[str] = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
UpperCamelCase : str = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
UpperCamelCase : List[Any] = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : int = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Optional[int] = projection_dim
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Dict = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : int = initializer_range
UpperCamelCase : Dict = scope
UpperCamelCase : Optional[int] = bos_token_id
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase : List[Any] = input_mask.numpy()
UpperCamelCase , UpperCamelCase : Union[str, Any] = input_mask.shape
UpperCamelCase : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = 1
UpperCamelCase : Any = 0
UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = TFBlipTextModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase : List[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : int = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BlipTextModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = TFBlipTextModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__SCREAMING_SNAKE_CASE )
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "markuplm"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1_001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : Optional[int] = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
# additional properties
UpperCamelCase : int = max_depth
UpperCamelCase : Dict = max_xpath_tag_unit_embeddings
UpperCamelCase : Union[str, Any] = max_xpath_subs_unit_embeddings
UpperCamelCase : Optional[int] = tag_pad_id
UpperCamelCase : str = subs_pad_id
UpperCamelCase : int = xpath_unit_hidden_size
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase : List[str] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a), "Tatoeba directory does not exist.")
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = CpmAntTokenizer
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Dict = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
UpperCamelCase : Tuple = '''今天天气真好!'''
UpperCamelCase : List[Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
UpperCamelCase : List[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = '''今天天气真好!'''
UpperCamelCase : List[Any] = [tokenizer.bos_token] + tokens
UpperCamelCase : Optional[int] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 1_0, '''max_num_jobs''': 1}, [range(1_0 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 1_0}, [range(SCREAMING_SNAKE_CASE_ , i + 1 ) for i in range(1_0 )]),
({'''num_shards''': 1, '''max_num_jobs''': 1_0}, [range(1 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({'''num_shards''': 3, '''max_num_jobs''': 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : int = _distribute_shards(**SCREAMING_SNAKE_CASE_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 1_0, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Dict = _split_gen_kwargs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Tuple = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE_ )
assert out == expected
| 643
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : str = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = "xlnet"
__UpperCamelCase : Optional[Any] = ["mems"]
__UpperCamelCase : Dict = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __SCREAMING_SNAKE_CASE=32_000 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="bi" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="tanh" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Dict = d_model
UpperCamelCase : str = n_layer
UpperCamelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase : List[str] = d_model // n_head
UpperCamelCase : Optional[Any] = ff_activation
UpperCamelCase : Any = d_inner
UpperCamelCase : Optional[int] = untie_r
UpperCamelCase : str = attn_type
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : List[Any] = dropout
UpperCamelCase : Dict = mem_len
UpperCamelCase : Optional[Any] = reuse_len
UpperCamelCase : List[Any] = bi_data
UpperCamelCase : Union[str, Any] = clamp_len
UpperCamelCase : str = same_length
UpperCamelCase : Union[str, Any] = summary_type
UpperCamelCase : List[str] = summary_use_proj
UpperCamelCase : Dict = summary_activation
UpperCamelCase : Union[str, Any] = summary_last_dropout
UpperCamelCase : Tuple = start_n_top
UpperCamelCase : int = end_n_top
UpperCamelCase : Optional[int] = bos_token_id
UpperCamelCase : Union[str, Any] = pad_token_id
UpperCamelCase : str = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = kwargs['''use_cache''']
UpperCamelCase : Dict = use_mems_eval
UpperCamelCase : Dict = use_mems_train
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 643
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 2, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 4] , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , __SCREAMING_SNAKE_CASE=[1, 2, 3] , ):
"""simple docstring"""
UpperCamelCase : List[str] = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : str = image_size
UpperCamelCase : List[Any] = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Tuple = embed_dim
UpperCamelCase : Optional[Any] = depths
UpperCamelCase : Optional[int] = num_heads
UpperCamelCase : Dict = window_size
UpperCamelCase : Union[str, Any] = mlp_ratio
UpperCamelCase : Tuple = qkv_bias
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = drop_path_rate
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Tuple = use_absolute_embeddings
UpperCamelCase : Optional[Any] = patch_norm
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Any = is_training
UpperCamelCase : List[Any] = scope
UpperCamelCase : int = use_labels
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : List[Any] = encoder_stride
UpperCamelCase : Optional[Any] = out_features
UpperCamelCase : str = out_indices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = MaskFormerSwinModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = ['''stem''']
UpperCamelCase : Dict = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Any = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Swin does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = outputs.hidden_states
UpperCamelCase : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Dict = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = 3
UpperCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = 0
return t
def check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase : Any = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}. Dict has"""
f""" `nan`: {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}."""
) , )
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase, _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = MaskFormerSwinConfig
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase : int = backbone_class(__SCREAMING_SNAKE_CASE )
backbone.to(__SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase : str = backbone(**__SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase : List[str] = backbone(**__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase : Dict = backbone(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__UpperCAmelCase : Optional[int] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 643
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 1
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCAmelCase : Optional[int] = datasets.logging.get_logger(__name__)
__UpperCAmelCase : List[str] = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__UpperCAmelCase : Any = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
__UpperCAmelCase : List[Any] = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__UpperCAmelCase : Dict = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
UpperCamelCase : int = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCamelCase : Optional[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCamelCase : Optional[int] = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCamelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCamelCase : Optional[Any] = score.BleurtScorer(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.scorer.score(references=__SCREAMING_SNAKE_CASE , candidates=__SCREAMING_SNAKE_CASE )
return {"scores": scores}
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
from math import pow, sqrt
def a ( *SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 643
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__UpperCAmelCase : List[Any] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 643
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__UpperCAmelCase : List[Any] = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = EfficientNetConfig()
UpperCamelCase : Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['''width_coef''']
UpperCamelCase : Optional[Any] = CONFIG_MAP[model_name]['''depth_coef''']
UpperCamelCase : int = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase : List[Any] = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCamelCase : Any = CONFIG_MAP[model_name]['''dw_padding''']
UpperCamelCase : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
UpperCamelCase : Optional[Any] = 1_0_0_0
UpperCamelCase : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase : Optional[int] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
"""simple docstring"""
UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : Dict = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase : Union[str, Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=SCREAMING_SNAKE_CASE_ , )
return preprocessor
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCamelCase : int = sorted(set(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = {b: str(SCREAMING_SNAKE_CASE_ ) for b, i in zip(SCREAMING_SNAKE_CASE_ , range(SCREAMING_SNAKE_CASE_ ) )}
UpperCamelCase : str = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCamelCase : Optional[int] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCamelCase : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase : str = '''efficientnet.''' + item[1]
UpperCamelCase : Tuple = '''classifier.weight'''
UpperCamelCase : List[Any] = '''classifier.bias'''
return key_mapping
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase : int = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase : str = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE_ ) )
else:
UpperCamelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Any = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE_ , weights='''imagenet''' , input_tensor=SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , pooling=SCREAMING_SNAKE_CASE_ , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCamelCase : Union[str, Any] = original_model.trainable_variables
UpperCamelCase : int = original_model.non_trainable_variables
UpperCamelCase : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase : Tuple = param.numpy()
UpperCamelCase : Any = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase : List[str] = get_efficientnet_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase : List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCamelCase : Optional[int] = rename_keys(SCREAMING_SNAKE_CASE_ )
replace_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Initialize preprocessor and preprocess input image
UpperCamelCase : List[str] = convert_image_processor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase : Optional[Any] = hf_model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase : str = False
UpperCamelCase : Tuple = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase : List[Any] = image.img_to_array(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = np.expand_dims(SCREAMING_SNAKE_CASE_ , axis=0 )
UpperCamelCase : Union[str, Any] = original_model.predict(SCREAMING_SNAKE_CASE_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.mkdir(SCREAMING_SNAKE_CASE_ )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE_ )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__UpperCAmelCase : List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 643
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 1
|
from PIL import Image
def a ( SCREAMING_SNAKE_CASE_ : Image , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Tuple = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(SCREAMING_SNAKE_CASE_ : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__UpperCAmelCase : Dict = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 643
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 1
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCamelCase : Dict = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Optional[int] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Tuple = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : Union[str, Any] = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : int = tax_attention_out
UpperCamelCase : Dict = tax_attention_query
UpperCamelCase : Dict = tax_attention_value
UpperCamelCase : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase : Tuple = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : Optional[Any] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : Dict = tax_mlp_layer_norm
UpperCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCamelCase : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Any = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCamelCase : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCamelCase : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCamelCase : Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCamelCase : Dict = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : int = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : str = tax_attention_out
UpperCamelCase : Union[str, Any] = tax_attention_query
UpperCamelCase : List[Any] = tax_attention_value
UpperCamelCase : int = tax_pre_attention_layer_norm
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_key
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_out
UpperCamelCase : Optional[int] = tax_enc_dec_attention_query
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_value
UpperCamelCase : List[str] = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase : str = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : List[str] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : int = txa_mlp_layer_norm
UpperCamelCase : List[Any] = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCamelCase : Optional[Any] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase : int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase : int = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCamelCase : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase : List[str] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[Any] = [False] * len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = [-1] * len(SCREAMING_SNAKE_CASE_ )
def dfs(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
UpperCamelCase : Dict = True
UpperCamelCase : int = c
for u in graph[v]:
if not visited[u]:
dfs(SCREAMING_SNAKE_CASE_ , 1 - c )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE_ , 0 )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase : Union[str, Any] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase : Optional[Any] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(1 )
UpperCamelCase : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = dc.update(2 )
UpperCamelCase : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = dc.update(3 )
UpperCamelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase : Union[str, Any] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = "xlm"
__UpperCamelCase : List[Any] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , __SCREAMING_SNAKE_CASE=30_145 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2_048**-0.5 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="first" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : str = vocab_size
UpperCamelCase : int = emb_dim
UpperCamelCase : List[str] = n_layers
UpperCamelCase : Any = n_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : Optional[Any] = attention_dropout
UpperCamelCase : List[Any] = gelu_activation
UpperCamelCase : Optional[Any] = sinusoidal_embeddings
UpperCamelCase : Union[str, Any] = causal
UpperCamelCase : Tuple = asm
UpperCamelCase : Union[str, Any] = n_langs
UpperCamelCase : int = use_lang_emb
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : str = bos_index
UpperCamelCase : List[str] = eos_index
UpperCamelCase : int = pad_index
UpperCamelCase : Optional[int] = unk_index
UpperCamelCase : Dict = mask_index
UpperCamelCase : int = is_encoder
UpperCamelCase : Any = max_position_embeddings
UpperCamelCase : Tuple = embed_init_std
UpperCamelCase : Optional[Any] = init_std
UpperCamelCase : Union[str, Any] = summary_type
UpperCamelCase : int = summary_use_proj
UpperCamelCase : Optional[int] = summary_activation
UpperCamelCase : Dict = summary_proj_to_labels
UpperCamelCase : Union[str, Any] = summary_first_dropout
UpperCamelCase : int = start_n_top
UpperCamelCase : Optional[int] = end_n_top
UpperCamelCase : Optional[Any] = mask_token_id
UpperCamelCase : Optional[Any] = lang_id
if "n_words" in kwargs:
UpperCamelCase : str = kwargs['''n_words''']
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = data
UpperCamelCase : Node | None = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : str = None
UpperCamelCase : Union[str, Any] = None
def __iter__( self ):
"""simple docstring"""
UpperCamelCase : int = self.head
while self.head:
yield node.data
UpperCamelCase : List[str] = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(__SCREAMING_SNAKE_CASE ) for item in iter(self ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.insert_nth(len(self ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.insert_nth(0 , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
UpperCamelCase : Optional[int] = Node(__SCREAMING_SNAKE_CASE )
if self.head is None:
UpperCamelCase : Any = new_node # first node points itself
UpperCamelCase : str = new_node
elif index == 0: # insert at head
UpperCamelCase : Tuple = self.head
UpperCamelCase : Dict = new_node
else:
UpperCamelCase : List[Any] = self.head
for _ in range(index - 1 ):
UpperCamelCase : Any = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Dict = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase : Any = new_node
def _lowercase ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def _lowercase ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
UpperCamelCase : Tuple = self.head
if self.head == self.tail: # just one node
UpperCamelCase : int = None
elif index == 0: # delete head node
UpperCamelCase : str = self.tail.next.next
UpperCamelCase : List[str] = self.head.next
else:
UpperCamelCase : List[Any] = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : List[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase : Optional[Any] = temp
return delete_node.data
def _lowercase ( self ):
"""simple docstring"""
return len(self ) == 0
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def a ( SCREAMING_SNAKE_CASE_ : int=True ):
"""simple docstring"""
UpperCamelCase : str = Accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
if iterable:
UpperCamelCase : Any = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
else:
UpperCamelCase : Any = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
return dl
def a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , ):
"""simple docstring"""
UpperCamelCase : List[Any] = create_dataloader(accelerator=SCREAMING_SNAKE_CASE_ , dataset_size=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a ( ):
"""simple docstring"""
UpperCamelCase : str = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a ( ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch.nn.Linear(1 , 1 )
UpperCamelCase : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
UpperCamelCase : int = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = ddp_model(batch[0].float() )
UpperCamelCase : Dict = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.nn.Linear(1 , 1 )
UpperCamelCase : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
UpperCamelCase : str = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = train_dl.batch_sampler.even_batches
UpperCamelCase : List[str] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = True
UpperCamelCase : str = False
UpperCamelCase : Optional[Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.nn.Linear(1 , 1 )
UpperCamelCase : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = create_accelerator()
UpperCamelCase : List[str] = torch.nn.Linear(1 , 1 )
UpperCamelCase : Dict = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCamelCase : Tuple = accelerator.state.distributed_type
UpperCamelCase : Optional[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = original_state
if __name__ == "__main__":
main()
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCamelCase : str = [[0 for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )]
# since string of zero length match pattern of zero length
UpperCamelCase : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCamelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCamelCase : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCamelCase : Tuple = dp[i - 1][j]
else:
UpperCamelCase : Any = 0
else:
UpperCamelCase : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase : Tuple = "aab"
__UpperCAmelCase : Any = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 2, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 4] , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=8 , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = embed_dim
UpperCamelCase : Any = depths
UpperCamelCase : Optional[Any] = num_heads
UpperCamelCase : Tuple = window_size
UpperCamelCase : List[Any] = mlp_ratio
UpperCamelCase : Union[str, Any] = qkv_bias
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = drop_path_rate
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[int] = use_absolute_embeddings
UpperCamelCase : List[Any] = patch_norm
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = initializer_range
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Dict = scope
UpperCamelCase : Optional[int] = use_labels
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : List[Any] = encoder_stride
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : List[str] = 1
UpperCamelCase : List[str] = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self.type_sequence_label_size
UpperCamelCase : Union[str, Any] = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase : Dict = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : int = False
__UpperCamelCase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = SwinvaModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[str] = True
UpperCamelCase : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : str = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[Any] = outputs.attentions
UpperCamelCase : List[str] = len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : int = config.window_size**2
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase : str = True
UpperCamelCase : Any = True
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : str = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
UpperCamelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCamelCase : Dict = 2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : int = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : List[str] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
UpperCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = reshaped_hidden_states[0].shape
UpperCamelCase : int = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase : str = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : str = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = 3
UpperCamelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : int = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) - pat_len + 1 ):
UpperCamelCase : Union[str, Any] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if s[i + j] != pattern[j]:
UpperCamelCase : str = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE_ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 1
|
import os
import sys
import unittest
__UpperCAmelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCAmelCase : str = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__UpperCAmelCase : Optional[int] = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = get_test_to_tester_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = {'''BertModelTest''': '''BertModelTester'''}
UpperCamelCase : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = get_model_to_test_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
UpperCamelCase : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = get_model_to_tester_mapping(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
UpperCamelCase : Optional[int] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
| 643
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = max_length
UpperCamelCase : Dict = max_position_embeddings
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input_ids.shape[-1]
UpperCamelCase : List[str] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = start_length
UpperCamelCase : Tuple = max_new_tokens
UpperCamelCase : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = max_time
UpperCamelCase : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return any(criteria(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for criteria in self )
@property
def _lowercase ( self ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
return None
def a ( SCREAMING_SNAKE_CASE_ : StoppingCriteriaList , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Any = stopping_criteria.max_length
UpperCamelCase : str = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 643
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = 100
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Dict = is_training
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = scope
UpperCamelCase : List[Any] = out_indices
UpperCamelCase : Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase : List[Any] = (image_size // patch_size) ** 2
UpperCamelCase : List[Any] = num_patches + 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.type_sequence_label_size
UpperCamelCase : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : Any = 1
UpperCamelCase : Optional[int] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self.num_labels
UpperCamelCase : Dict = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[str] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeitModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : int = False
UpperCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Any = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
UpperCamelCase : Tuple = torch.ones((1, 196) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = outputs.logits
# verify the logits
UpperCamelCase : int = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.default_image_processor
UpperCamelCase : List[str] = prepare_img()
UpperCamelCase : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : Union[str, Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.default_image_processor
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Any = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
UpperCamelCase : List[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase : List[str] = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase : Any = Image.open(ds[0]['''file'''] )
UpperCamelCase : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
UpperCamelCase : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCamelCase : Optional[Any] = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
UpperCamelCase : Any = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase : str = model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=640 , do_center_crop=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase : Dict = Image.open(ds[0]['''file'''] )
UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = outputs.logits.detach().cpu()
UpperCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
UpperCamelCase : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
import string
from math import logaa
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : int = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
UpperCamelCase : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase : Optional[int] = corpus_without_punctuation.split('''\n''' )
UpperCamelCase : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE_ ))
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return round(tf * idf , 3 )
| 643
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : Dict ): # noqa: E741
"""simple docstring"""
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 0
UpperCamelCase : Dict = [0] * n
UpperCamelCase : List[str] = [False] * n
UpperCamelCase : List[Any] = [False] * n
def dfs(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
if parent == root:
out_edge_count += 1
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase : List[str] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCamelCase : List[str] = True
# AP found via cycle
if at == low[to]:
UpperCamelCase : Optional[int] = True
else:
UpperCamelCase : Any = min(low[at] , SCREAMING_SNAKE_CASE_ )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE_ ):
if not visited[i]:
UpperCamelCase : Dict = 0
UpperCamelCase : Any = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE_ ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency list of graph
__UpperCAmelCase : int = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase : int = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = True
while ask_again:
UpperCamelCase : Dict = input(SCREAMING_SNAKE_CASE_ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE_ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=[] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase : int = BulletMenu(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = menu.run(default_choice=SCREAMING_SNAKE_CASE_ )
return convert_value(SCREAMING_SNAKE_CASE_ ) if convert_value is not None else result
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = int(SCREAMING_SNAKE_CASE_ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = int(SCREAMING_SNAKE_CASE_ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : str = int(SCREAMING_SNAKE_CASE_ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Dict = int(SCREAMING_SNAKE_CASE_ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase_ ( argparse.RawDescriptionHelpFormatter):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 643
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 1
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "prophetnet.tokenizer"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__UpperCAmelCase : List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__UpperCAmelCase : List[str] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as reader:
UpperCamelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = token.rstrip('''\n''' )
UpperCamelCase : str = index
return vocab
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
UpperCamelCase : Dict = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
UpperCamelCase : Any = f"""[unused{i}]"""
UpperCamelCase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
UpperCamelCase : Tuple = 12
UpperCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__SCREAMING_SNAKE_CASE )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.__dict__.copy()
UpperCamelCase : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Tuple = {}
UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Tuple = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
UpperCamelCase : Tuple = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 643
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , ):
"""simple docstring"""
UpperCamelCase : Dict = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Dict = patch_size
UpperCamelCase : Tuple = max_length
UpperCamelCase : Tuple = num_mel_bins
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Optional[int] = use_labels
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : Any = scope
UpperCamelCase : Any = frequency_stride
UpperCamelCase : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase : Tuple = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase : List[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase : Dict = frequency_out_dimension * time_out_dimension
UpperCamelCase : Any = num_patches + 2
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = ASTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ASTModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : int = ['''input_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Any = ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase : str = torchaudio.load(SCREAMING_SNAKE_CASE_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.default_feature_extractor
UpperCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.default_feature_extractor
UpperCamelCase , UpperCamelCase : Union[str, Any] = prepare_audio()
UpperCamelCase : str = audio.squeeze().numpy()
UpperCamelCase : Tuple = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : bool = field(default=_a, metadata={"help": "Whether to use SortishSampler or not."})
__UpperCamelCase : bool = field(
default=_a, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."})
__UpperCamelCase : Optional[int] = field(
default=_a, metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_a, metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
}, )
__UpperCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=_a, metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
}, )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = v.to_dict()
return d
| 643
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : torch.Tensor # [batch_size x 3]
__UpperCamelCase : torch.Tensor # [batch_size x 3]
__UpperCamelCase : torch.Tensor # [batch_size x 3]
__UpperCamelCase : torch.Tensor # [batch_size x 3]
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : float
__UpperCamelCase : Tuple[int]
def _lowercase ( self ):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowercase ( self ):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowercase ( self ):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = torch.arange(self.height * self.width )
UpperCamelCase : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(__SCREAMING_SNAKE_CASE , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , *UpperCamelCase : Optional[int] = self.shape
UpperCamelCase : Tuple = int(np.prod(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Dict = self.get_image_coords()
UpperCamelCase : int = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCamelCase : List[str] = self.get_camera_rays(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = rays.view(__SCREAMING_SNAKE_CASE , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , *UpperCamelCase , UpperCamelCase : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase : Tuple = coords.view(__SCREAMING_SNAKE_CASE , -1 , 2 )
UpperCamelCase : Any = self.resolution()
UpperCamelCase : Union[str, Any] = self.fov()
UpperCamelCase : str = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase : List[Any] = fracs * torch.tan(fov / 2 )
UpperCamelCase : Tuple = fracs.view(__SCREAMING_SNAKE_CASE , -1 , 2 )
UpperCamelCase : str = (
self.z.view(__SCREAMING_SNAKE_CASE , 1 , 3 )
+ self.x.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCamelCase : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , 2 , 3 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=self.x_fov , y_fov=self.y_fov , )
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCamelCase : List[Any] = np.array([np.sin(SCREAMING_SNAKE_CASE_ ), np.cos(SCREAMING_SNAKE_CASE_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase : Optional[int] = -z * 4
UpperCamelCase : Any = np.array([np.cos(SCREAMING_SNAKE_CASE_ ), -np.sin(SCREAMING_SNAKE_CASE_ ), 0.0] )
UpperCamelCase : List[str] = np.cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
origins.append(SCREAMING_SNAKE_CASE_ )
xs.append(SCREAMING_SNAKE_CASE_ )
ys.append(SCREAMING_SNAKE_CASE_ )
zs.append(SCREAMING_SNAKE_CASE_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE_ , axis=0 ) ).float() , width=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(SCREAMING_SNAKE_CASE_ )) , )
| 643
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 0.0
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCamelCase : str = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[Any] = 5_1_2
UpperCamelCase : str = [1] + [0] * (size - 1)
UpperCamelCase : Dict = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
UpperCamelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase : int = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = 2_0 * np.logaa(SCREAMING_SNAKE_CASE_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
UpperCamelCase : str = get_bounds(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(SCREAMING_SNAKE_CASE_ )
plt.show()
def a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Optional[int] = 5_1_2
UpperCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
UpperCamelCase : str = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
UpperCamelCase : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCamelCase : List[str] = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE_ , -2 * pi ) )
plt.show()
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _lowercase ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCamelCase : Optional[Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
self.assertGreater(__SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{
'''score''': ANY(__SCREAMING_SNAKE_CASE ),
'''label''': ANY(__SCREAMING_SNAKE_CASE ),
'''box''': {'''xmin''': ANY(__SCREAMING_SNAKE_CASE ), '''ymin''': ANY(__SCREAMING_SNAKE_CASE ), '''xmax''': ANY(__SCREAMING_SNAKE_CASE ), '''ymax''': ANY(__SCREAMING_SNAKE_CASE )},
}
for i in range(__SCREAMING_SNAKE_CASE )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
UpperCamelCase : int = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
UpperCamelCase : Tuple = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = pipeline('''zero-shot-object-detection''' )
UpperCamelCase : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
UpperCamelCase : Optional[int] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = 0.2
UpperCamelCase : Tuple = pipeline('''zero-shot-object-detection''' )
UpperCamelCase : Union[str, Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[int] = pipeline('''zero-shot-object-detection''' )
UpperCamelCase : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : Optional[Any] = ''''''
UpperCamelCase : Tuple = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : int = [1 for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Dict = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : Tuple = j - k + 1 # noqa: E741
UpperCamelCase : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : List[str] = length[j]
UpperCamelCase : List[str] = j
# create that string
UpperCamelCase : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE=[2, 3, 4] , __SCREAMING_SNAKE_CASE=1 , ):
"""simple docstring"""
UpperCamelCase : str = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Tuple = image_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Union[str, Any] = embeddings_size
UpperCamelCase : Dict = hidden_sizes
UpperCamelCase : Any = depths
UpperCamelCase : int = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : Any = hidden_act
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : Any = scope
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = out_features
UpperCamelCase : str = out_indices
UpperCamelCase : List[str] = num_groups
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : Any = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : int = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase : List[str] = None
UpperCamelCase : str = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = config_and_inputs
UpperCamelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : int = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = BitModelTester(self )
UpperCamelCase : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Dict = [*signature.parameters.keys()]
UpperCamelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Any = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase : List[Any] = layer_type
UpperCamelCase : Tuple = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : int = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : str = prepare_img()
UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : int = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = BitConfig
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BitModelTester(self )
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
import pprint
import requests
__UpperCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def a ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def a ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
__UpperCAmelCase : List[str] = random_quotes()
pprint.pprint(response)
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
import os
import string
import sys
__UpperCAmelCase : Optional[Any] = 1 << 8
__UpperCAmelCase : List[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__UpperCAmelCase : Dict = KEYMAP["up"]
__UpperCAmelCase : Union[str, Any] = KEYMAP["left"]
if sys.platform == "win32":
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCAmelCase : Optional[int] = ord(str(i))
def a ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
UpperCamelCase : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
UpperCamelCase : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCamelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCamelCase : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
UpperCamelCase : str = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCamelCase : List[str] = cha[1]
else:
UpperCamelCase : Dict = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCamelCase : str = sys.stdin.fileno()
UpperCamelCase : List[str] = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
UpperCamelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
UpperCamelCase : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : str = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : Tuple = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : str = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class UpperCAmelCase_ ( metaclass=_a):
'''simple docstring'''
__UpperCamelCase : Dict = ["torch", "transformers", "onnx"]
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.