code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(i + 1 , _lowerCAmelCase ):
if numbers[j] < numbers[i]:
A , A : Dict = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_:Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 116
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "camembert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[str] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Tuple = position_embedding_type
A : str = use_cache
A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ ={
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =[
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128
|
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_UpperCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , *_lowercase : Optional[Any] , **_lowercase : int ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 332
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__A = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = BartTokenizer
def __init__( self : List[str] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]="replace" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Tuple=True , **UpperCamelCase__ : Optional[Any] , )-> int:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Any = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: Union[str, Any] = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase: Tuple = "post_processor"
__lowerCAmelCase: str = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Tuple = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: Dict = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: Union[str, Any] = tuple(state["cls"])
__lowerCAmelCase: Optional[Any] = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Dict = add_prefix_space
__lowerCAmelCase: Optional[int] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: int = trim_offsets
__lowerCAmelCase: Dict = True
if changes_to_apply:
__lowerCAmelCase: int = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: List[str] = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : Any , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: Union[str, Any] = value
def lowercase_ ( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any)-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[str] = kwargs.get("is_split_into_words" , UpperCamelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[str] = kwargs.get("is_split_into_words" , UpperCamelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 108
|
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108
| 1
|
def A_ ( A__ = 1000 ) -> int:
return sum(e for e in range(3 , A__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 99
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'roberta'
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : int = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Any = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : str = use_cache
_SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__ )
class _UpperCamelCase :
'''simple docstring'''
_A : str
_A : str
_A : Optional[str] = None
_A : Optional[str] = None
_A : Optional[str] = None
@dataclass(frozen=lowerCamelCase__ )
class _UpperCamelCase :
'''simple docstring'''
_A : List[int]
_A : Optional[List[int]] = None
_A : Optional[List[int]] = None
_A : Optional[Union[int, float]] = None
_A : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[InputFeatures]
def __init__( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int=False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCAmelCase__ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) , lowerCAmelCase__ , ) , )
__SCREAMING_SNAKE_CASE : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE : Optional[int] = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowerCAmelCase__ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
)
logger.info("""Training examples: %s""" , len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase__ )
torch.save(self.features , lowerCAmelCase__ )
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase :
'''simple docstring'''
_A : List[InputFeatures]
def __init__( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = 1_2_8 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : List[Any] = label_list
__SCREAMING_SNAKE_CASE : List[str] = processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(lowerCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__SCREAMING_SNAKE_CASE : List[Any] = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return self.dataset
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.label_list
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , """heuristics_train_set.txt""" ) ) , """train""" )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = []
for i, line in enumerate(lowerCAmelCase__ ):
if i == 0:
continue
__SCREAMING_SNAKE_CASE : Any = """%s-%s""" % (set_type, line[0])
__SCREAMING_SNAKE_CASE : Tuple = line[5]
__SCREAMING_SNAKE_CASE : Dict = line[6]
__SCREAMING_SNAKE_CASE : Tuple = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__SCREAMING_SNAKE_CASE : Tuple = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
return examples
def lowerCAmelCase_ ( _lowerCamelCase: List[InputExample] , _lowerCamelCase: List[str] , _lowerCamelCase: int , _lowerCamelCase: PreTrainedTokenizer , ):
__SCREAMING_SNAKE_CASE : Dict = {label: i for i, label in enumerate(_lowerCamelCase )}
__SCREAMING_SNAKE_CASE : str = []
for ex_index, example in tqdm.tqdm(enumerate(_lowerCamelCase ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , truncation=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[int] = label_map[example.label] if example.label in label_map else 0
__SCREAMING_SNAKE_CASE : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F"guid: {example}" )
logger.info(F"features: {features[i]}" )
return features
UpperCamelCase__ : List[str] = {
'''hans''': 3,
}
UpperCamelCase__ : str = {
'''hans''': HansProcessor,
}
| 112
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
# Initialize Result
__SCREAMING_SNAKE_CASE : Tuple = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCamelCase__ : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
UpperCamelCase__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase__ : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase__ : str = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f"Following is minimal change for {value}: ")
UpperCamelCase__ : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 112
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =precision
SCREAMING_SNAKE_CASE__ : int =ceil(precision / 1_4 )
SCREAMING_SNAKE_CASE__ : int =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
SCREAMING_SNAKE_CASE__ : Tuple =1
SCREAMING_SNAKE_CASE__ : Any =1_3_5_9_1_4_0_9
SCREAMING_SNAKE_CASE__ : List[Any] =Decimal(UpperCamelCase__ )
for k in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 222
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = filter(lambda __lowerCamelCase : p.requires_grad, model.parameters() )
UpperCAmelCase_ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_a = logging.getLogger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase ):
if metric == "rouge2":
UpperCAmelCase_ : str = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Optional[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : str = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ : int = ModelCheckpoint(
dirpath=__UpperCamelCase, filename=__UpperCamelCase, monitor=f"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def __a ( __lowerCamelCase, __lowerCamelCase ):
return EarlyStopping(
monitor=f"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=__UpperCamelCase, verbose=__UpperCamelCase, )
class A_ (pl.Callback ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Optional[Any] = od / "test_results.txt"
UpperCAmelCase_ : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : List[str] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "a+" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase_ : Union[str, Any] = val.item()
UpperCAmelCase_ : Optional[int] = F"""{key}: {val:.6f}\n"""
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Optional[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
try:
UpperCAmelCase_ : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Tuple = pl_module.model.num_parameters()
UpperCAmelCase_ : List[Any] = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "test" )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 61
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="resnet50" , __lowercase=3 , __lowercase=32 , __lowercase=3 , __lowercase=True , __lowercase=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def _snake_case (self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def _snake_case (self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = TimmBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[int] = False
def _snake_case (self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def _snake_case (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case (self ):
__lowerCAmelCase = '''resnet18'''
__lowerCAmelCase = '''microsoft/resnet-18'''
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _snake_case (self ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _snake_case (self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
__lowerCAmelCase = self._prepare_for_class(__lowercase , __lowercase )
__lowerCAmelCase = model(**__lowercase )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
| 9
|
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9
| 1
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = LongformerTokenizer
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = LongformerTokenizerFast
lowerCAmelCase_ : Any = True
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def lowerCAmelCase__ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = "lower newer"
snake_case_ = "lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = "lower newer"
snake_case_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ = tokenizer.tokenize(a__ ) # , add_prefix_space=True)
self.assertListEqual(a__ , a__ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=a__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=a__ )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=a__ )
snake_case_ = tokenizer.encode(
"sequence builders" , add_special_tokens=a__ , add_prefix_space=a__ )
snake_case_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a__ , add_prefix_space=a__ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
snake_case_ = "Encode this sequence."
snake_case_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a__ , a__ )
snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a__ , a__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a__ , a__ )
# Testing spaces after special tokens
snake_case_ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(a__ , lstrip=a__ , rstrip=a__ )} ) # mask token has a left space
snake_case_ = tokenizer.convert_tokens_to_ids(a__ )
snake_case_ = "Encode <mask> sequence"
snake_case_ = "Encode <mask>sequence"
snake_case_ = tokenizer.encode(a__ )
snake_case_ = encoded.index(a__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a__ , a__ )
snake_case_ = tokenizer.encode(a__ )
snake_case_ = encoded.index(a__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
snake_case_ = self.tokenizer_class.from_pretrained(a__ , **a__ )
snake_case_ = "A, <mask> AllenNLP sentence."
snake_case_ = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
snake_case_ = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a__ )
self.assertEqual(post_processor_state["add_prefix_space"] , a__ )
self.assertEqual(post_processor_state["trim_offsets"] , a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ = F'{text_of_1_token} {text_of_1_token}'
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , )
snake_case_ = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ) + 1, 1 + len(a__ ) + 1 + len(a__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ )
snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
| 85
|
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : str =len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase : int =i + 1
else:
UpperCAmelCase : List[str] =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 78
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( lowerCamelCase__ ):
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Tuple ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : List[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[Any] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Optional[Any] ='''1'''
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Any ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : Union[str, Any] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Union[str, Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Any =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[str] =self.get_env()
UpperCAmelCase : Any =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCAmelCase : int ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Any =self.get_env()
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict ='''
from transformers import pipeline
'''
UpperCAmelCase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCAmelCase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[int] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCAmelCase : List[str] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any ='''
from transformers import AutoModel
'''
UpperCAmelCase : Optional[Any] ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Any ='''1'''
UpperCAmelCase : Dict =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 78
| 1
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__lowercase = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__lowercase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _A :
"""simple docstring"""
def __init__( self : Tuple):
a : List[str] = WATERMARK_BITS
a : int = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def __snake_case ( self : int , __UpperCAmelCase : torch.FloatTensor):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
a : Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a : str = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a : Any = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a : Any = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0)
return images
| 40
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
__snake_case = '''timm_backbone'''
def __init__( self: Union[str, Any] , a: Any=None , a: Dict=3 , a: List[str]=True , a: Optional[Any]=True , a: Tuple=None , **a: Union[str, Any] , ):
super().__init__(**a )
__lowerCamelCase : Any = backbone
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = features_only
__lowerCamelCase : int = use_pretrained_backbone
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = out_indices if out_indices is not None else (-1,)
| 363
|
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 9 and set(SCREAMING_SNAKE_CASE__ ) == set('123456789' )
def UpperCamelCase__ ( ):
for base_num in range(9_999 , 4_999 , -1 ):
__lowerCamelCase : Tuple = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Union[str, Any] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 194
| 0
|
from string import ascii_uppercase
_lowerCamelCase ={char: i for i, char in enumerate(ascii_uppercase)}
_lowerCamelCase =dict(enumerate(ascii_uppercase))
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = len(lowerCamelCase )
lowerCamelCase : str = 0
while True:
if x == i:
lowerCamelCase : int = 0
if len(lowerCamelCase ) == len(lowerCamelCase ):
break
key += key[i]
i += 1
return key
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = """"""
lowerCamelCase : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = """"""
lowerCamelCase : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _a ( ):
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : List[Any] = """SECRET"""
lowerCamelCase : Optional[Any] = generate_key(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Union[str, Any] = cipher_text(lowerCamelCase, lowerCamelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(lowerCamelCase, lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 287
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> list[int]:
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 351
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase_ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
UpperCAmelCase_ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
UpperCAmelCase_ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( A__ : Dict , A__ : List[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( A__ : int , A__ : str ):
'''simple docstring'''
__lowerCamelCase = simple_accuracy(A__ , A__ )
__lowerCamelCase = float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = float(pearsonr(A__ , A__ )[0] )
__lowerCamelCase = float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__( datasets.Metric):
def lowerCAmelCase__ ( self: Dict ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 12
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[str] , **lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : int , lowerCAmelCase_ : Union[np.ndarray, bytes, str] , **lowerCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] ={}
if "candidate_labels" in kwargs:
A__ : List[Any] =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ : Optional[Any] =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str="This is a sound of {}." ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ : Any =requests.get(lowerCAmelCase_ ).content
else:
with open(lowerCAmelCase_ , """rb""" ) as f:
A__ : Dict =f.read()
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : str =ffmpeg_read(lowerCAmelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase_ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ : int =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ : Union[str, Any] =candidate_labels
A__ : Tuple =[hypothesis_template.format(lowerCAmelCase_ ) for x in candidate_labels]
A__ : Dict =self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework , padding=lowerCAmelCase_ )
A__ : int =[text_inputs]
return inputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =model_inputs.pop("""candidate_labels""" )
A__ : List[str] =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase_ ):
A__ : List[str] =text_inputs[0]
else:
# Batching case.
A__ : Optional[Any] =text_inputs[0][0]
A__ : List[str] =self.model(**lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : List[str] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Any =model_outputs.pop("""candidate_labels""" )
A__ : str =model_outputs["""logits"""][0]
if self.framework == "pt":
A__ : str =logits.softmax(dim=0 )
A__ : List[str] =probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ : List[str] =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : -x[0] )
]
return result
| 134
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=1_3 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Dict=3_6 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=None , )-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Tuple = batch_size
__lowerCAmelCase: Any = seq_length
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: int = use_input_mask
__lowerCAmelCase: Tuple = use_token_type_ids
__lowerCAmelCase: Any = use_labels
__lowerCAmelCase: Dict = vocab_size
__lowerCAmelCase: Optional[Any] = embedding_size
__lowerCAmelCase: Tuple = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Union[str, Any] = num_hidden_groups
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: List[str] = intermediate_size
__lowerCAmelCase: List[str] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Tuple = max_position_embeddings
__lowerCAmelCase: int = type_vocab_size
__lowerCAmelCase: Dict = type_sequence_label_size
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: int = num_choices
__lowerCAmelCase: Dict = scope
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: Dict = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: Union[str, Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Tuple = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[int])-> Union[str, Any]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = AlbertModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: str = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = AlbertForPreTraining(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def lowercase_ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[str] = AlbertForMaskedLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = AlbertForQuestionAnswering(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Optional[int] = AlbertForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: str = AlbertForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.num_choices
__lowerCAmelCase: int = AlbertForMultipleChoice(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = config_and_inputs
__lowerCAmelCase: List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = True
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=False)-> str:
'''simple docstring'''
__lowerCAmelCase: Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__)
if return_labels:
if model_class in get_values(UpperCamelCase__):
__lowerCAmelCase: Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__)
__lowerCAmelCase: Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
return inputs_dict
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = AlbertModelTester(self)
__lowerCAmelCase: int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Any)-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Any)-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__)
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__)
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: int = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
@slow
def lowercase_ ( self : List[Any])-> List[Any]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: int = AlbertModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = AlbertModel.from_pretrained("albert-base-v2")
__lowerCAmelCase: Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCAmelCase: Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__lowerCAmelCase: int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
__lowerCAmelCase: List[str] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , UpperCamelCase__)
__lowerCAmelCase: List[str] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4))
| 108
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 108
| 1
|
import string
import numpy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return b if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase )
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda A_ : x % 3_6 )
UpperCAmelCase__ : int = numpy.vectorize(__UpperCAmelCase )
def __init__( self , A_ ) -> Optional[Any]:
__UpperCamelCase =self.modulus(lowerCAmelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase =encrypt_key.shape[0]
def _a ( self , A_ ) -> List[Any]:
return self.key_string.index(lowerCAmelCase_ )
def _a ( self , A_ ) -> Dict:
return self.key_string[round(lowerCAmelCase_ )]
def _a ( self ) -> int:
__UpperCamelCase =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase =det % len(self.key_string )
__UpperCamelCase =len(self.key_string )
if greatest_common_divisor(lowerCAmelCase_ , len(self.key_string ) ) != 1:
__UpperCamelCase =(
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(lowerCAmelCase_ )
def _a ( self , A_ ) -> Any:
__UpperCamelCase =[char for char in text.upper() if char in self.key_string]
__UpperCamelCase =chars[-1]
while len(lowerCAmelCase_ ) % self.break_key != 0:
chars.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =self.process_text(text.upper() )
__UpperCamelCase =''
for i in range(0 , len(lowerCAmelCase_ ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase =text[i : i + self.break_key]
__UpperCamelCase =[self.replace_letters(lowerCAmelCase_ ) for char in batch]
__UpperCamelCase =numpy.array([vec] ).T
__UpperCamelCase =self.modulus(self.encrypt_key.dot(lowerCAmelCase_ ) ).T.tolist()[
0
]
__UpperCamelCase =''.join(
self.replace_digits(lowerCAmelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _a ( self ) -> str:
__UpperCamelCase =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase =det % len(self.key_string )
__UpperCamelCase =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase =i
break
__UpperCamelCase =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCAmelCase_ ) )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =self.make_decrypt_key()
__UpperCamelCase =self.process_text(text.upper() )
__UpperCamelCase =''
for i in range(0 , len(lowerCAmelCase_ ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase =text[i : i + self.break_key]
__UpperCamelCase =[self.replace_letters(lowerCAmelCase_ ) for char in batch]
__UpperCamelCase =numpy.array([vec] ).T
__UpperCamelCase =self.modulus(decrypt_key.dot(lowerCAmelCase_ ) ).T.tolist()[0]
__UpperCamelCase =''.join(
self.replace_digits(lowerCAmelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _UpperCAmelCase ( ):
__UpperCamelCase =int(input('Enter the order of the encryption key: ' ) )
__UpperCamelCase =[]
print('Enter each row of the encryption key with space separated integers' )
for _ in range(__lowerCAmelCase ):
__UpperCamelCase =[int(__lowerCAmelCase ) for x in input().split()]
hill_matrix.append(__lowerCAmelCase )
__UpperCamelCase =HillCipher(numpy.array(__lowerCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__UpperCamelCase =input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__UpperCamelCase =input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(__lowerCAmelCase ) )
elif option == "2":
__UpperCamelCase =input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "Speech2TextFeatureExtractor"
lowercase__ = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.feature_extractor
lowercase_ = False
def __call__( self : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
lowercase_ = kwargs.pop("""raw_speech""")
else:
lowercase_ = kwargs.pop("""audio""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""sampling_rate""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""text""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
lowercase_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None:
lowercase_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@contextmanager
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
lowercase_ = True
lowercase_ = self.tokenizer
yield
lowercase_ = self.feature_extractor
lowercase_ = False
| 136
| 0
|
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase : str = b % a, a
return b
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if gcd(UpperCamelCase__ , UpperCamelCase__ ) != 1:
_UpperCAmelCase : int = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = 0, 1, m
while va != 0:
_UpperCAmelCase : Union[str, Any] = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 367
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = ScoreSdeVeScheduler()
_UpperCAmelCase = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCAmelCase ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCAmelCase , return_dict=UpperCAmelCase )[
0
]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'google/ncsnpp-church-256'
_UpperCAmelCase = UNetaDModel.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = ScoreSdeVeScheduler.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : int = []
def A ( snake_case :list[list[int]] , snake_case :int , snake_case :int ) -> bool:
for i in range(len(snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case , -1 , -1 ) , range(snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case , -1 , -1 ) , range(snake_case , len(snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def A ( snake_case :list[list[int]] , snake_case :int ) -> bool:
if row >= len(snake_case ):
solution.append(snake_case )
printboard(snake_case )
print()
return True
for i in range(len(snake_case ) ):
if is_safe(snake_case , snake_case , snake_case ):
__UpperCamelCase = 1
solve(snake_case , row + 1 )
__UpperCamelCase = 0
return False
def A ( snake_case :list[list[int]] ) -> None:
for i in range(len(snake_case ) ):
for j in range(len(snake_case ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase : Optional[Any] = 8
UpperCamelCase : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 371
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = 'utf-8'
__UpperCamelCase = to_json_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' )
__UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 263
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [0] * len(__magic_name__ )
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
UpperCamelCase :str = queue.pop(0 )
cnt += 1
topo.append(__magic_name__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__magic_name__ )
if cnt != len(__magic_name__ ):
print("""Cycle exists""" )
else:
print(__magic_name__ )
# Adjacency List of Graph
UpperCAmelCase_ : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 38
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : int = 32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase :List[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase :List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase :Union[str, Any] = config["""lr"""]
UpperCamelCase :List[str] = int(config["""num_epochs"""] )
UpperCamelCase :str = int(config["""seed"""] )
UpperCamelCase :Dict = int(config["""batch_size"""] )
UpperCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCamelCase :Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase :Any = 1
UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase :int = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase :Tuple = 0
# Now we train the model
UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Dict = outputs.loss
UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase :str = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__magic_name__ ) - 1:
UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCamelCase :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __magic_name__ )
UpperCamelCase :Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase :str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , )
parser.add_argument(
"""--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any]=13 , __a : Any=7 , __a : Optional[Any]=True , __a : Dict=True , __a : List[Any]=True , __a : Any=True , __a : int=99 , __a : Union[str, Any]=32 , __a : Union[str, Any]=5 , __a : int=4 , __a : str=4 , __a : Any="gelu" , __a : Union[str, Any]=0.0 , __a : Tuple=0.1 , __a : Optional[Any]=True , __a : Union[str, Any]=512 , __a : Tuple=16 , __a : List[Any]=2 , __a : List[Any]=0.02 , __a : Union[str, Any]=3 , __a : List[str]=4 , __a : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = parent
__lowercase : List[Any] = batch_size
__lowercase : List[Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : Optional[Any] = use_input_mask
__lowercase : Dict = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = intermediate_multiple_size
__lowercase : List[str] = hidden_act
__lowercase : Dict = hidden_dropout
__lowercase : Union[str, Any] = attention_dropout
__lowercase : Any = weight_tying
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : str = num_labels
__lowercase : Optional[Any] = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Any = None
if self.use_labels:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
__lowercase : List[str] = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__lowercase : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Any = True
__lowercase : Optional[Any] = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = True
__lowercase : List[str] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__lowercase : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__lowercase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
__lowercase : Dict = output_from_no_past['''hidden_states'''][0]
__lowercase : Dict = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
__lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[Any] = config_and_inputs
__lowercase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_A : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_A : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_A : Optional[int] = False
_A : str = False
_A : Dict = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = '''abeja/gpt-neox-japanese-2.7b'''
__lowercase : str = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__lowercase : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__lowercase : Any = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
__lowercase : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
__lowercase : Union[str, Any] = []
for prompt in prompts:
__lowercase : int = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
__lowercase : List[str] = model.generate(UpperCamelCase__ , max_length=50 )
__lowercase : int = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 367
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Dict = num_heads
UpperCAmelCase__ : Any = window_size
UpperCAmelCase__ : str = mlp_ratio
UpperCAmelCase__ : str = qkv_bias
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings
UpperCAmelCase__ : Optional[int] = patch_norm
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Tuple = encoder_stride
UpperCAmelCase__ : Optional[int] = out_features
UpperCAmelCase__ : str = out_indices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
UpperCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["""stem"""]
UpperCAmelCase__ : List[Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = MaskFormerSwinModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : str = outputs.hidden_states
UpperCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
UpperCAmelCase__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."""
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
UpperCAmelCase__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MaskFormerSwinModelTester(self )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
UpperCAmelCase__ : int = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 171
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 352
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79
| 0
|
"""simple docstring"""
class snake_case :
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : list ):
'''simple docstring'''
__A = set_counts
__A = max(_lowerCamelCase )
__A = len(_lowerCamelCase )
__A = [1] * num_sets
__A = list(range(_lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
__A = self.get_parent(_lowerCamelCase )
__A = self.get_parent(_lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__A = 0
__A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__A = 0
__A = src_parent
__A = self.set_counts[src_parent]
__A = max(self.max_set, _lowerCamelCase )
return True
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
__A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 266
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266
| 1
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , a__ ).groups()[0]
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = file_names
_UpperCamelCase = image_transform
_UpperCamelCase = label_to_id
def __len__( self : str) -> Tuple:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self : Optional[int] , lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.file_names[idx]
_UpperCamelCase = PIL.Image.open(lowercase_)
_UpperCamelCase = raw_image.convert("RGB")
if self.image_transform is not None:
_UpperCamelCase = self.image_transform(lowercase_)
_UpperCamelCase = extract_label(lowercase_)
if self.label_to_id is not None:
_UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
if args.with_tracking:
_UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config["lr"]
_UpperCamelCase = int(config["num_epochs"] )
_UpperCamelCase = int(config["seed"] )
_UpperCamelCase = int(config["batch_size"] )
_UpperCamelCase = config["image_size"]
if not isinstance(a__ , (list, tuple) ):
_UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase = os.path.split(a__ )[-1].split("." )[0]
accelerator.init_trackers(a__ , a__ )
# Grab all the image filenames
_UpperCamelCase = [os.path.join(args.data_dir , a__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_UpperCamelCase = [extract_label(a__ ) for fname in file_names]
_UpperCamelCase = list(set(a__ ) )
id_to_label.sort()
_UpperCamelCase = {lbl: i for i, lbl in enumerate(a__ )}
# Set the seed before splitting the data.
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# Split our filenames between train and validation
_UpperCamelCase = np.random.permutation(len(a__ ) )
_UpperCamelCase = int(0.8 * len(a__ ) )
_UpperCamelCase = random_perm[:cut]
_UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase = Compose([RandomResizedCrop(a__ , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=a__ , label_to_id=a__ )
# For evaluation, we use a deterministic Resize
_UpperCamelCase = Compose([Resize(a__ ), ToTensor()] )
_UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=a__ , label_to_id=a__ )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
_UpperCamelCase = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = create_model("resnet50d" , pretrained=a__ , num_classes=len(a__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase = False
for param in model.get_classifier().parameters():
_UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase = OneCycleLR(optimizer=a__ , max_lr=a__ , epochs=a__ , steps_per_epoch=len(a__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase = os.path.splitext(a__ )[0]
if "epoch" in training_difference:
_UpperCamelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
_UpperCamelCase = None
else:
_UpperCamelCase = int(training_difference.replace("step_" , "" ) )
_UpperCamelCase = resume_step // len(a__ )
resume_step -= starting_epoch * len(a__ )
# Now we train the model
for epoch in range(a__ , a__ ):
model.train()
if args.with_tracking:
_UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase = accelerator.skip_first_batches(a__ , a__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch["image"] - mean) / std
_UpperCamelCase = model(a__ )
_UpperCamelCase = torch.nn.functional.cross_entropy(a__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(a__ , a__ ):
_UpperCamelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
model.eval()
_UpperCamelCase = 0
_UpperCamelCase = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase = (batch["image"] - mean) / std
with torch.no_grad():
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
_UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(a__ ),
"epoch": epoch,
} , step=a__ , )
if checkpointing_steps == "epoch":
_UpperCamelCase = f'epoch_{epoch}'
if args.output_dir is not None:
_UpperCamelCase = os.path.join(args.output_dir , a__ )
accelerator.save_state(a__ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=a__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=a__ , default=a__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=a__ , default=a__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=a__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=a__ , default=a__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=a__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 63
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 63
| 1
|
"""simple docstring"""
from typing import Any
class __A :
"""simple docstring"""
def __init__( self , __A ) -> Union[str, Any]:
a =data
a =None
class __A :
"""simple docstring"""
def __init__( self ) -> List[str]:
a =None
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.head
while temp is not None:
print(temp.data , end=''' ''' )
a =temp.next
print()
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
a =Node(__A )
a =self.head
a =new_node
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> str:
if node_data_a == node_data_a:
return
else:
a =self.head
while node_a is not None and node_a.data != node_data_a:
a =node_a.next
a =self.head
while node_a is not None and node_a.data != node_data_a:
a =node_a.next
if node_a is None or node_a is None:
return
a , a =node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 81
|
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
a__: List[Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Tuple =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Optional[Any] =-1
a__: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: Tuple =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: List[str] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a__: List[Any] =TextStreamer(_a )
model.generate(_a , max_new_tokens=1_0 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__: Union[str, Any] =cs.out[:-1]
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : str ):
a__: int =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Tuple =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Dict =-1
a__: List[Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: List[str] =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: Union[str, Any] =tokenizer.decode(greedy_ids[0] )
a__: Union[str, Any] =TextIteratorStreamer(_a )
a__: Optional[int] ={"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer}
a__: Union[str, Any] =Thread(target=model.generate , kwargs=_a )
thread.start()
a__: Any =""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : Optional[Any] ):
a__: str =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Dict =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: List[str] =-1
a__: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: List[Any] =model.generate(_a , max_new_tokens=1_0 , do_sample=_a )
a__: Tuple =greedy_ids[:, input_ids.shape[1] :]
a__: List[Any] =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a__: Dict =TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=1_0 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__: Union[str, Any] =cs.out[:-1]
self.assertEqual(_a , _a )
def _lowerCamelCase ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a__: Optional[Any] =AutoTokenizer.from_pretrained("distilgpt2" )
a__: str =AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_a )
a__: Any =-1
a__: Optional[int] =torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a__: Optional[int] =TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a__: Union[str, Any] =cs.out[:-1] # Remove the final "\n"
a__: int =tokenizer(_a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Optional[Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__: Optional[Any] =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
a__: Tuple =-1
a__: Optional[int] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
a__: Any =TextIteratorStreamer(_a , timeout=0.0_0_1 )
a__: Dict ={"input_ids": input_ids, "max_new_tokens": 1_0, "do_sample": False, "streamer": streamer}
a__: int =Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
a__: Tuple =""
for new_text in streamer:
streamer_text += new_text
| 360
|
def __lowerCamelCase ( __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
a__: List[str] =F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
a__: Union[str, Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
a__: List[Any] =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''data2vec-audio'''
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=16 , _UpperCAmelCase=19 , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase="sum" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase)
__A : Tuple = hidden_size
__A : Optional[Any] = feat_extract_activation
__A : int = list(_UpperCAmelCase)
__A : str = list(_UpperCAmelCase)
__A : List[str] = list(_UpperCAmelCase)
__A : Optional[Any] = conv_bias
__A : int = num_conv_pos_embeddings
__A : Dict = num_conv_pos_embedding_groups
__A : str = conv_pos_kernel_size
__A : Optional[Any] = len(self.conv_dim)
__A : Any = num_hidden_layers
__A : Optional[int] = intermediate_size
__A : int = hidden_act
__A : Any = num_attention_heads
__A : Any = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Optional[int] = activation_dropout
__A : int = feat_proj_dropout
__A : int = final_dropout
__A : Union[str, Any] = layerdrop
__A : Tuple = layer_norm_eps
__A : Union[str, Any] = initializer_range
__A : Dict = vocab_size
__A : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : List[str] = mask_time_prob
__A : str = mask_time_length
__A : Union[str, Any] = mask_time_min_masks
__A : List[str] = mask_feature_prob
__A : Optional[int] = mask_feature_length
__A : Tuple = mask_feature_min_masks
# ctc loss
__A : str = ctc_loss_reduction
__A : Union[str, Any] = ctc_zero_infinity
# adapter
__A : int = add_adapter
__A : Any = adapter_kernel_size
__A : Union[str, Any] = adapter_stride
__A : str = num_adapter_layers
__A : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__A : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__A : Any = list(_UpperCAmelCase)
__A : Optional[int] = list(_UpperCAmelCase)
__A : Dict = list(_UpperCAmelCase)
__A : List[str] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return math.prod(self.conv_stride)
| 190
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : Dict = np.array(__snake_case )
__A : List[Any] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'facebook/sam-vit-huge'
__A : Optional[Any] = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , )
| 190
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE :List[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = ['''MobileNetV2FeatureExtractor''']
__SCREAMING_SNAKE_CASE :str = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 100_0000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = limit + 1
_UpperCAmelCase = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 156
| 1
|
from math import isqrt, loga
def _lowercase ( UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def _lowercase ( UpperCamelCase_ = 800800 , UpperCamelCase_ = 800800 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = degree * loga(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 176
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : str =DebertaVaTokenizer
A__ : List[str] =DebertaVaTokenizerFast
A__ : Any =True
A__ : str =True
def A_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'this is a test'
SCREAMING_SNAKE_CASE__ = 'this is a test'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(UpperCAmelCase_ ) , 30001 )
def A_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Any ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Tuple ):
pass
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 'This is a test'
SCREAMING_SNAKE_CASE__ = [13, 1, 4398, 25, 21, 1289]
SCREAMING_SNAKE_CASE__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
SCREAMING_SNAKE_CASE__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase_ , )
@slow
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 176
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
UpperCAmelCase_ = timeit.default_timer()
UpperCAmelCase_ = func(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_ = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ = func.__name__
return wrapper
def A ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = seq_shapes or {}
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
UpperCAmelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
UpperCAmelCase_ = v.feature
UpperCAmelCase_ = seq_shapes[k]
UpperCAmelCase_ = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
UpperCAmelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase_ = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 344
|
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ='''▁'''
A__ : List[Any] ={'''vocab_file''': '''spiece.model'''}
A__ : List[Any] ={
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
A__ : str ={
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any="</s>" , __snake_case : Dict="<unk>" , __snake_case : List[str]=[] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
return self.sp_model.get_piece_size()
def lowercase__ ( self : Optional[int] ) -> Dict[str, int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : str , __snake_case : Dict ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : int , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : str , __snake_case : Any ) -> Optional[int]:
return self.sp_model.piece_to_id(__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> Optional[Any]:
if index < self.sp_model.get_piece_size():
_lowerCAmelCase = self.sp_model.IdToPiece(__snake_case )
return token
def lowercase__ ( self : List[Any] , __snake_case : str ) -> int:
_lowerCAmelCase = []
_lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
_lowerCAmelCase = []
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 70
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True, UpperCAmelCase__="pt" ) -> str:
A_ = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and not line.startswith(""" """ ) else {}
A_ = padding_side
return tokenizer(
[line], max_length=UpperCAmelCase__, padding="""max_length""" if pad_to_max_length else None, truncation=UpperCAmelCase__, return_tensors=UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, **UpperCAmelCase__, )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, ) -> List[str]:
A_ = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="train" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.source""" )
A_ = Path(UpperCamelCase__ ).joinpath(type_path + """.target""" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self ) -> Dict:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip("""\n""" )
A_ = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , """right""" )
A_ = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , """right""" )
A_ = source_inputs["""input_ids"""].squeeze()
A_ = target_inputs["""input_ids"""].squeeze()
A_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A_ = torch.stack([x["""input_ids"""] for x in batch] )
A_ = torch.stack([x["""attention_mask"""] for x in batch] )
A_ = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ )
A_ , A_ = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__lowerCamelCase = getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
A_ = get_git_info()
save_json(UpperCAmelCase__, os.path.join(UpperCAmelCase__, """git_log.json""" ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=4, **UpperCAmelCase__ ) -> Dict:
with open(UpperCAmelCase__, """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=UpperCAmelCase__, **UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def UpperCAmelCase__ ( ) -> Any:
A_ = git.Repo(search_parent_directories=UpperCAmelCase__ )
A_ = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List:
return list(map(UpperCAmelCase__, UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
with open(UpperCAmelCase__, """wb""" ) as f:
return pickle.dump(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
def remove_articles(UpperCAmelCase__ ):
return re.sub(r"""\b(a|an|the)\b""", """ """, UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = normalize_answer(UpperCAmelCase__ ).split()
A_ = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = 1.0 * num_same / len(UpperCAmelCase__ )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = 0
for hypo, pred in zip(UpperCAmelCase__, UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__, UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
return model_prefix.startswith("""rag""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = """dropout_rate"""
for p in extra_params:
if getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__, UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__, equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
continue
A_ = p if hasattr(UpperCAmelCase__, UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__, UpperCAmelCase__, getattr(UpperCAmelCase__, UpperCAmelCase__ ) )
delattr(UpperCAmelCase__, UpperCAmelCase__ )
return hparams, config
| 162
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
debug_launcher(test_ops.main )
| 353
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase : List[str] = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase : Dict = '▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , )-> None:
'''simple docstring'''
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self )-> int:
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 251
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_UpperCAmelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
_UpperCAmelCase = model.state_dict()
def to_tf_var_name(__lowerCAmelCase ):
for patt, repl in iter(__lowerCAmelCase ):
_UpperCAmelCase = name.replace(__lowerCAmelCase , __lowerCAmelCase )
return F"""bert/{name}"""
def create_tf_var(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
_UpperCAmelCase = tf.get_variable(dtype=__lowerCAmelCase , shape=tensor.shape , name=__lowerCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowerCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_UpperCAmelCase = to_tf_var_name(__lowerCAmelCase )
_UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_UpperCAmelCase = torch_tensor.T
_UpperCAmelCase = create_tf_var(tensor=__lowerCAmelCase , name=__lowerCAmelCase , session=__lowerCAmelCase )
tf.keras.backend.set_value(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = session.run(__lowerCAmelCase )
print(F"""Successfully created {tf_name}: {np.allclose(__lowerCAmelCase , __lowerCAmelCase )}""" )
_UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __A ( __lowerCAmelCase=None )-> Any:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Directory in which to save tensorflow model' )
_UpperCAmelCase = parser.parse_args(__lowerCAmelCase )
_UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 39
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCAmelCase__ = 0
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCAmelCase__ = tuple[int, int]
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : int ,lowercase__ : Node | None ,):
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase__ ) + abs(lowercase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict ,lowercase__ : Node ):
return self.f_cost < other.f_cost
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : TPosition ,lowercase__ : TPosition ):
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowercase__ )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,lowercase__ )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase__ )
self.closed_nodes.append(lowercase__ )
__lowercase = self.get_successors(lowercase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase__ )
else:
self.open_nodes.append(lowercase__ )
return [self.start.pos]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Node ):
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase__ ,lowercase__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowercase__ ,) )
return successors
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Node | None ):
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : TPosition ,lowercase__ : TPosition ):
__lowercase = AStar(lowercase__ ,lowercase__ )
__lowercase = AStar(lowercase__ ,lowercase__ )
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase__ ,lowercase__ )
self.fwd_astar.closed_nodes.append(lowercase__ )
self.bwd_astar.closed_nodes.append(lowercase__ )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase__ )
else:
astar.open_nodes.append(lowercase__ )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Node ,lowercase__ : Node ):
__lowercase = self.fwd_astar.retrace_path(lowercase__ )
__lowercase = self.bwd_astar.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = AStar(init, goal)
lowerCAmelCase__ = a_star.search()
lowerCAmelCase__ = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BidirectionalAStar(init, goal)
lowerCAmelCase__ = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 353
|
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = 2
while True:
__lowercase = 0
__lowercase = fa + fa
__lowercase , __lowercase = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52
| 0
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowercase : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
A_ : Dict = BeautifulSoup(requests.get(__lowercase ).text ,'html.parser' )
A_ : List[Any] = soup.findAll('h1' )
A_ : List[str] = soup.findAll('div' ,{'class': 'maincounter-number'} )
keys += soup.findAll('span' ,{'class': 'panel-title'} )
values += soup.findAll('div' ,{'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase ,__lowercase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 140
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''char'''
lowerCamelCase_ = '''bpe'''
lowerCamelCase_ = '''wp'''
_UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ = '''ViTImageProcessor'''
lowerCamelCase_ = '''MgpstrTokenizer'''
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
A_ : Optional[int] = kwargs.pop('feature_extractor' )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A_ : Union[str, Any] = tokenizer
A_ : List[Any] = AutoTokenizer.from_pretrained('gpt2' )
A_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A_ : List[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None:
A_ : Union[str, Any] = self.char_tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A_ : Optional[int] = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ , A_ : int = sequences
A_ : Union[str, Any] = char_preds.size(0 )
A_ , A_ : Union[str, Any] = self._decode_helper(lowercase , 'char' )
A_ , A_ : List[str] = self._decode_helper(lowercase , 'bpe' )
A_ , A_ : Optional[int] = self._decode_helper(lowercase , 'wp' )
A_ : Dict = []
A_ : Optional[int] = []
for i in range(lowercase ):
A_ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
A_ : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
A_ : Union[str, Any] = scores.index(max(lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A_ : Dict = {}
A_ : str = final_strs
A_ : Union[str, Any] = final_scores
A_ : Optional[Any] = char_strs
A_ : Dict = bpe_strs
A_ : str = wp_strs
return out
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A_ : List[Any] = self.char_decode
A_ : List[Any] = 1
A_ : str = '[s]'
elif format == DecodeType.BPE:
A_ : List[Any] = self.bpe_decode
A_ : Optional[int] = 2
A_ : Tuple = '#'
elif format == DecodeType.WORDPIECE:
A_ : Optional[int] = self.wp_decode
A_ : Optional[int] = 1_0_2
A_ : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
A_ , A_ : Dict = [], []
A_ : Any = pred_logits.size(0 )
A_ : Optional[int] = pred_logits.size(1 )
A_ , A_ : int = pred_logits.topk(1 , dim=-1 , largest=lowercase , sorted=lowercase )
A_ : Dict = preds_index.view(-1 , lowercase )[:, 1:]
A_ : Any = decoder(lowercase )
A_ , A_ : List[Any] = torch.nn.functional.softmax(lowercase , dim=2 ).max(dim=2 )
A_ : List[str] = preds_max_prob[:, 1:]
for index in range(lowercase ):
A_ : int = preds_str[index].find(lowercase )
A_ : Union[str, Any] = preds_str[index][:pred_eos]
A_ : Dict = preds_index[index].cpu().tolist()
A_ : List[str] = pred_index.index(lowercase ) if eos_token in pred_index else -1
A_ : List[str] = preds_max_prob[index][: pred_eos_index + 1]
A_ : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase )
conf_scores.append(lowercase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowercase )]
return decode_strs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowercase )]
return decode_strs
| 140
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_snake_case : Any = StableDiffusionInstructPixaPixPipeline
_snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(__lowerCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" )
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = """french fries"""
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = [inputs["""prompt"""]] * 2
_UpperCAmelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_UpperCAmelCase = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCAmelCase = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [round(__lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = VaeImageProcessor(do_resize=__lowerCAmelCase , do_normalize=__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt""" ) )[0]
_UpperCAmelCase = components["""vae"""]
_UpperCAmelCase = self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase = pipe(**__lowerCAmelCase )[0]
_UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__lowerCAmelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str]=0 ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_UpperCAmelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 0
def callback_fn(__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase_ ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase = inputs["""image"""].resize((504, 504) )
_UpperCAmelCase = """timbrooks/instruct-pix2pix"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_UpperCAmelCase = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "Pix2StructImageProcessor"
__UpperCamelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = False
super().__init__(lowercase_ , lowercase_)
def __call__( self : Dict , lowercase_ : Optional[int]=None , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = 2048 , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''')
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , **lowercase_)
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , header_text=lowercase_ , **lowercase_)
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ : List[Any] = text_encoding.pop('''attention_mask''')
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ : Dict = text_encoding.pop('''input_ids''')
else:
SCREAMING_SNAKE_CASE_ : str = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_)
return encoding_image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[str]):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : Any):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 91
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCAmelCase__ : Union[str, Any] ={
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def _lowercase ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase ={}
state_dict.pop("""pixel_mean""" , _UpperCAmelCase )
state_dict.pop("""pixel_std""" , _UpperCAmelCase )
lowerCamelCase =r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase =key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
lowerCamelCase =key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowerCamelCase =key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowerCamelCase =key.replace("""layers.2""" , """proj_out""" )
lowerCamelCase =value
lowerCamelCase =model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="ybelkada/segment-anything" ) -> List[Any]:
lowerCamelCase =hf_hub_download(_UpperCAmelCase , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
lowerCamelCase =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase =SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase =SamConfig(
vision_config=_UpperCAmelCase , )
elif "sam_vit_h" in model_name:
lowerCamelCase =SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase =SamConfig(
vision_config=_UpperCAmelCase , )
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =replace_keys(_UpperCAmelCase )
lowerCamelCase =SamImageProcessor()
lowerCamelCase =SamProcessor(image_processor=_UpperCAmelCase )
lowerCamelCase =SamModel(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
lowerCamelCase =hf_model.to("""cuda""" )
lowerCamelCase ="""https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
lowerCamelCase =Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("""RGB""" )
lowerCamelCase =[[[4_00, 6_50]]]
lowerCamelCase =[[1]]
lowerCamelCase =processor(images=np.array(_UpperCAmelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase =hf_model(**_UpperCAmelCase )
lowerCamelCase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase =processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase =hf_model(**_UpperCAmelCase )
lowerCamelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase =((75, 2_75, 17_25, 8_50),)
lowerCamelCase =processor(images=np.array(_UpperCAmelCase ) , input_boxes=_UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase =hf_model(**_UpperCAmelCase )
lowerCamelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase =[[[4_00, 6_50], [8_00, 6_50]]]
lowerCamelCase =[[1, 1]]
lowerCamelCase =processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase =hf_model(**_UpperCAmelCase )
lowerCamelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
UpperCAmelCase__ : Any =argparse.ArgumentParser()
UpperCAmelCase__ : Dict =['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
UpperCAmelCase__ : Optional[int] =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 262
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 262
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( a :Optional[Any] , a :int , a :List[str] , a :List[str] ) -> Tuple:
a = s.rsplit(a , a )
return new.join(a )
def _a ( a :Any ) -> List[Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _a ( a :Any ) -> List[Any]:
a = {}
a = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
a = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
a = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
a = rreplace(a , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
a = rreplace(a , '''.b''' , '''.bias''' , 1 )
a = value.float()
return upgrade
@torch.no_grad()
def _a ( a :Union[str, Any] , a :Optional[Any] , a :Optional[int]=None , a :str=True ) -> Tuple:
from dall_e import Encoder
a = Encoder()
if os.path.exists(a ):
a = torch.load(a )
else:
a = torch.hub.load_state_dict_from_url(a )
if isinstance(a , a ):
a = ckpt.state_dict()
encoder.load_state_dict(a )
if config_path is not None:
a = FlavaImageCodebookConfig.from_pretrained(a )
else:
a = FlavaImageCodebookConfig()
a = FlavaImageCodebook(a ).eval()
a = encoder.state_dict()
a = upgrade_state_dict(a )
hf_model.load_state_dict(a )
a = hf_model.state_dict()
a = count_parameters(a )
a = count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(a )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: List[Any] = logging.getLogger()
def UpperCamelCase__( )->Union[str, Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 193
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(r'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(r'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(r'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(r'(_{2,})')
UpperCAmelCase_ = r'^\w+(\.\w+)*$'
UpperCAmelCase_ = r'<>:/\|?*'
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , A__ )
__lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , A__ )
return name.lower()
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = _single_underscore_re.split(A__ )
__lowerCamelCase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != """""" )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(A__ )
def lowerCamelCase__ ( A__ : Dict , A__ : Dict ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , A__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(A__ )}-{split}'
def lowerCamelCase__ ( A__ : int , A__ : Tuple , A__ : Optional[Any] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__lowerCamelCase = os.path.join(A__ , A__ )
return f'{filepath}*'
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] , A__ : int , A__ : int=None , A__ : Any=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
__lowerCamelCase = os.path.join(A__ , A__ )
if shard_lengths:
__lowerCamelCase = len(A__ )
__lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(A__ )]
if filetype_suffix:
__lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__lowerCamelCase = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 12
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12
| 1
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None) -> Union[str, Any]:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples', 'by_feature'))
snake_case_ = os.path.abspath('examples')
for item in os.listdir(lowerCAmelCase__):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(lowerCAmelCase__, lowerCAmelCase__)
if os.path.isfile(lowerCAmelCase__) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__, feature_script=lowerCAmelCase__, tested_section='main()' if parser_only else 'training_function()', ):
snake_case_ = compare_against_test(
os.path.join(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = '\n'.join(lowerCAmelCase__)
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(lowerCAmelCase__, '')
self.assertEqual(lowerCAmelCase__, '')
def a_ ( self) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = os.path.abspath(os.path.join('examples', 'cv_example.py'))
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = False
@classmethod
def a_ ( cls) -> List[Any]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir, 'default_config.yml')
write_basic_config(save_location=cls.configPath)
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def a_ ( cls) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def a_ ( self) -> int:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0')))
def a_ ( self) -> Optional[int]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
snake_case_ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2')))
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "step_2")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
else:
self.assertIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
@slow
def a_ ( self) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'}):
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
snake_case_ = re.findall('({.+})', lowerCAmelCase__)
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(lowerCAmelCase__)
self.assertGreaterEqual(results['accuracy'], 0.75)
def a_ ( self) -> int:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def a_ ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__, 'tracking')))
def a_ ( self) -> Optional[Any]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def a_ ( self) -> Optional[int]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=6_40 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="silu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_hidden_size
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = conv_kernel_size
SCREAMING_SNAKE_CASE__ : int = output_stride
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : str = scope
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> str:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : str = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE__ : Tuple = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Tuple = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 25
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 1
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _a ( _SCREAMING_SNAKE_CASE = "dhaka" , _SCREAMING_SNAKE_CASE = 5 ) -> Optional[int]:
snake_case_ = min(UpperCamelCase__ , 50 ) # Prevent abuse!
snake_case_ = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
snake_case_ = requests.get("""https://www.google.com/search""" , params=UpperCamelCase__ , headers=UpperCamelCase__ )
snake_case_ = BeautifulSoup(html.text , """html.parser""" )
snake_case_ = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
snake_case_ = json.dumps(UpperCamelCase__ )
snake_case_ = json.loads(UpperCamelCase__ )
snake_case_ = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCamelCase__ , )
if not matched_google_image_data:
return 0
snake_case_ = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCamelCase__ ) , )
snake_case_ = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCamelCase__ , )
for index, fixed_full_res_image in enumerate(UpperCamelCase__ ):
if index >= max_images:
return index
snake_case_ = bytes(UpperCamelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
snake_case_ = bytes(UpperCamelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
snake_case_ = urllib.request.build_opener()
snake_case_ = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(UpperCamelCase__ )
snake_case_ = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
urllib.request.urlretrieve( # noqa: S310
UpperCamelCase__ , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : int = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 347
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294
| 0
|
import baseaa
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return baseaa.aaadecode(lowerCAmelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 260
| 0
|
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> list:
__snake_case = len(snake_case_ )
__snake_case = []
for i in range(len(snake_case_ ) - pat_len + 1 ):
__snake_case = True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
__snake_case = False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 24
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24
| 1
|
"""simple docstring"""
from functools import reduce
lowerCamelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase__ ( lowercase_ = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase_ ,lowercase_ : str(int(lowercase_ ) * int(lowercase_ ) ) ,n[i : i + 13] ) )
for i in range(len(lowercase_ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baadecode(UpperCamelCase_ ).decode("""utf-8""" )
if __name__ == "__main__":
__magic_name__ = "Hello World!"
__magic_name__ = baseaa_encode(test)
print(encoded)
__magic_name__ = baseaa_decode(encoded)
print(decoded)
| 100
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = XLNetTokenizer
__lowercase : List[str] = XLNetTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
while second != 0:
UpperCAmelCase_ = first & second
first ^= second
UpperCAmelCase_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: Optional[Any] =int(input('Enter the first number: ').strip())
SCREAMING_SNAKE_CASE_: Any =int(input('Enter the second number: ').strip())
print(f"{add(first, second) = }")
| 106
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : List[Any] , __a : Dict , __a : int="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|startoftext|>" , __a : Tuple="<|endoftext|>" , __a : Optional[int]=False , **__a : int , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowercase (self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def _lowercase (self : List[Any] , __a : List[str] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : List[Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = "".join(__a ).strip()
return out_string
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self : int , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = 0
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Dict , __a : Any , __a : int ):
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self : Dict ):
return len(self.ids_to_tokens )
def _lowercase (self : str , __a : Union[str, Any] ):
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , __a )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : str=False ):
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(__a , __a )
if clean:
UpperCAmelCase_ = self.clean_text(__a )
def check_simbol(__a : List[Any] ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(__a : Tuple ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(__a ):
UpperCAmelCase_ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def _lowercase (self : int , __a : Optional[Any] , __a : Optional[int]="\n" ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(__a )
return text
| 106
| 1
|
def UpperCamelCase (lowercase_: str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
| 1
|
from collections import defaultdict
class _A :
def __init__( self : str , _A : Optional[int] , _A : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowercase : Optional[int] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
lowercase : Optional[int] = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowercase : Tuple = (1 << len(_A )) - 1
def __a ( self : int , _A : List[str] , _A : Any ) -> str:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowercase : List[Any] = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowercase : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def __a ( self : int , _A : Tuple ) -> str:
"""simple docstring"""
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 116
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''maskformer'''
_UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCamelCase : Dict = ['''resnet''', '''swin''']
_UpperCamelCase : Optional[int] = ['''detr''']
def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : Optional[int] = backbone_config.pop('''model_type''' )
lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase : Union[str, Any] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase : Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Union[str, Any] = (
decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
lowercase : str = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(_A )
lowercase : Tuple = backbone_config
lowercase : List[Any] = decoder_config
# main feature dimension for the model
lowercase : Optional[int] = fpn_feature_size
lowercase : List[Any] = mask_feature_size
# initializer
lowercase : Union[str, Any] = init_std
lowercase : Tuple = init_xavier_std
# Hungarian matcher && loss
lowercase : List[str] = cross_entropy_weight
lowercase : int = dice_weight
lowercase : List[Any] = mask_weight
lowercase : Tuple = use_auxiliary_loss
lowercase : Tuple = no_object_weight
lowercase : int = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any:
"""simple docstring"""
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def __a ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.backbone_config.to_dict()
lowercase : List[Any] = self.decoder_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output
| 116
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__snake_case , )
assert hasattr(self , """env""" )
def lowercase__ ( self : Optional[int] , __snake_case : List[str]=1 ) -> Optional[Any]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowercase__ ( self : Any , __snake_case : Optional[int] ) -> Optional[Any]:
TrainingJobAnalytics(__snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def lowercase__ ( self : int ) -> Any:
# create estimator
_lowerCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __snake_case )
| 70
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case : int = False
@skip_mps
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__( cls :int ) -> int:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowerCamelCase__( cls :Optional[Any] ) -> int:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=__snake_case ,)
a__ = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=__snake_case ,set_alpha_to_one=__snake_case ,)
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act='gelu' ,projection_dim=5_12 ,)
a__ = CLIPTextModel(__snake_case )
a__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ,__snake_case :Tuple=0 ) -> Any:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = a__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = self.get_dummy_inputs(__snake_case )
a__ = pipe(**__snake_case ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
a__ = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case ,1E-3 )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__( self :Dict ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4 )
def lowerCamelCase__( self :Dict ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class snake_case_ (unittest.TestCase ):
@classmethod
def lowerCamelCase__( cls :Optional[Any] ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = torch.manual_seed(51 )
a__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=__snake_case ,torch_dtype=torch.floataa )
pipe.to('cuda' )
a__ = 'a painting of an elephant with glasses'
a__ = [5, 7]
a__ = pipe(
prompt=__snake_case ,token_indices=__snake_case ,guidance_scale=7.5 ,generator=__snake_case ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 240
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def lowerCamelCase__ ( A__ : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
__lowerCamelCase = np.nan
for i in range(A__ ):
__lowerCamelCase = features[:, labels == i]
__lowerCamelCase = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int ):
'''simple docstring'''
__lowerCamelCase = features.mean(1 )
__lowerCamelCase = np.nan
for i in range(A__ ):
__lowerCamelCase = features[:, labels == i]
__lowerCamelCase = data.shape[1]
__lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase__ ( A__ : np.ndarray , A__ : int ):
'''simple docstring'''
if features.any():
__lowerCamelCase = features.mean(1 )
# Center the dataset
__lowerCamelCase = features - np.reshape(A__ , (data_mean.size, 1) )
__lowerCamelCase = np.dot(A__ , centered_data.T ) / features.shape[1]
__lowerCamelCase, __lowerCamelCase = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase = np.dot(filtered_eigenvectors.T , A__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase, __lowerCamelCase = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
__lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = np.linalg.svd(A__ )
__lowerCamelCase = svd_matrix[:, 0:dimensions]
__lowerCamelCase = np.dot(filtered_svd_matrix.T , A__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=A__ )
logging.error("""Dataset empty""" )
raise AssertionError
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase = 2
__lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
__lowerCamelCase = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase = 2
__lowerCamelCase = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
__lowerCamelCase = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a : str = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: List[str] ):
"""simple docstring"""
try:
with open(lowerCAmelCase__ , """rb""" ) as flax_state_f:
UpperCAmelCase_: Union[str, Any] = from_bytes(lowerCAmelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCAmelCase__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase_: int = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase__ : x.dtype == jnp.bfloataa , lowerCAmelCase__ ) ).values()
if any(lowerCAmelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase_: List[Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase__ )
UpperCAmelCase_: Dict = """"""
UpperCAmelCase_: Dict = flatten_dict(lowerCAmelCase__ , sep=""".""" )
UpperCAmelCase_: str = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCAmelCase_: str = []
UpperCAmelCase_: Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_: Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCAmelCase_: int = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCAmelCase_: Optional[int] = jnp.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCAmelCase_: Tuple = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCAmelCase_: Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCAmelCase_: Dict = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
UpperCAmelCase_: str = """.""".join(lowerCAmelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCAmelCase_: Any = np.asarray(lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , np.ndarray ) else flax_tensor
UpperCAmelCase_: int = torch.from_numpy(lowerCAmelCase__ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase__ )
pt_model.load_state_dict(lowerCAmelCase__ )
# re-transform missing_keys to list
UpperCAmelCase_: Optional[int] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 147
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = "cpu", SCREAMING_SNAKE_CASE_ = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase_: Optional[Any] = device
UpperCAmelCase_: Optional[Any] = CLIPTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase_: Optional[Any] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase_: Optional[Any] = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCAmelCase_: Tuple = torchvision.transforms.Resize(224 )
UpperCAmelCase_: Any = torchvision.transforms.CenterCrop(224 )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Dict = self.resize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.center_crop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.normalize(SCREAMING_SNAKE_CASE_ )
return images
def __call__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Dict = self.tokenizer(text=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.preprocess_img(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
def __init__(self, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.0_1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="image", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, ) -> None:
super().__init__()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: List[str] = device if device else get_device()
if vqgan:
UpperCAmelCase_: int = vqgan
else:
UpperCAmelCase_: Optional[Any] = load_vqgan(self.device, conf_path=SCREAMING_SNAKE_CASE_, ckpt_path=SCREAMING_SNAKE_CASE_ )
self.vqgan.eval()
if clip:
UpperCAmelCase_: List[str] = clip
else:
UpperCAmelCase_: Any = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
UpperCAmelCase_: Optional[int] = ProcessorGradientFlow(device=self.device )
UpperCAmelCase_: Optional[int] = iterations
UpperCAmelCase_: List[Any] = lr
UpperCAmelCase_: str = log
UpperCAmelCase_: Tuple = make_grid
UpperCAmelCase_: List[str] = return_val
UpperCAmelCase_: Dict = quantize
UpperCAmelCase_: int = self.vqgan.decoder.z_shape
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=True ) -> List[Any]:
UpperCAmelCase_: Tuple = []
if output_path is None:
UpperCAmelCase_: Optional[int] = """./animation.gif"""
if input_path is None:
UpperCAmelCase_: Tuple = self.save_path
UpperCAmelCase_: List[Any] = sorted(glob(input_path + """/*""" ) )
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
UpperCAmelCase_: Dict = total_duration / len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = [frame_duration] * len(SCREAMING_SNAKE_CASE_ )
if extend_frames:
UpperCAmelCase_: List[str] = 1.5
UpperCAmelCase_: List[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(SCREAMING_SNAKE_CASE_ ) )
imageio.mimsave(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, duration=SCREAMING_SNAKE_CASE_ )
print(f'gif saved to {output_path}' )
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
UpperCAmelCase_: List[Any] = preprocess(Image.open(SCREAMING_SNAKE_CASE_ ), target_image_size=256 ).to(self.device )
UpperCAmelCase_: Union[str, Any] = preprocess_vqgan(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ , *UpperCAmelCase_: str = self.vqgan.encode(SCREAMING_SNAKE_CASE_ )
return z
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.latent.detach().requires_grad_()
UpperCAmelCase_: Optional[int] = base_latent + transform_vector
if self.quantize:
UpperCAmelCase_ , *UpperCAmelCase_: Optional[Any] = self.vqgan.quantize(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Tuple = trans_latent
return self.vqgan.decode(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[str]:
UpperCAmelCase_: Any = self.clip_preprocessor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""", padding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = self.clip(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase_: Any = similarity_logits * weights
return similarity_logits.sum()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: Dict = self._get_clip_similarity(pos_prompts["""prompts"""], SCREAMING_SNAKE_CASE_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
UpperCAmelCase_: Tuple = self._get_clip_similarity(neg_prompts["""prompts"""], SCREAMING_SNAKE_CASE_, weights=neg_prompts["""weights"""] )
else:
UpperCAmelCase_: Any = torch.tensor([1], device=self.device )
UpperCAmelCase_: List[str] = -torch.log(SCREAMING_SNAKE_CASE_ ) + torch.log(SCREAMING_SNAKE_CASE_ )
return loss
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Tuple = torch.randn_like(self.latent, requires_grad=SCREAMING_SNAKE_CASE_, device=self.device )
UpperCAmelCase_: str = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase_: Optional[int] = self._add_vector(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = loop_post_process(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = self._get_CLIP_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print("""CLIP loss""", SCREAMING_SNAKE_CASE_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
wandb.init(reinit=SCREAMING_SNAKE_CASE_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
UpperCAmelCase_: str = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if not prompts:
return []
UpperCAmelCase_: Tuple = []
UpperCAmelCase_: str = []
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ):
UpperCAmelCase_: str = prompt[0]
UpperCAmelCase_: List[str] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase_ , UpperCAmelCase_: int = prompt.split(""":""" )
UpperCAmelCase_: int = float(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: str = prompt
UpperCAmelCase_: Dict = 1.0
processed_prompts.append(SCREAMING_SNAKE_CASE_ )
weights.append(SCREAMING_SNAKE_CASE_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(SCREAMING_SNAKE_CASE_, device=self.device ),
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, ) -> Optional[Any]:
if image_path:
UpperCAmelCase_: Optional[int] = self._get_latent(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: str = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase_: List[Any] = self.process_prompts(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = self.process_prompts(SCREAMING_SNAKE_CASE_ )
if save_final and save_path is None:
UpperCAmelCase_: Optional[int] = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: List[str] = save_path + """_""" + get_timestamp()
os.makedirs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = save_path
UpperCAmelCase_: Optional[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Tuple = loop_post_process(SCREAMING_SNAKE_CASE_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ):
if show_intermediate:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(SCREAMING_SNAKE_CASE_ )} )
if show_final:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}_final.png' ) )
| 147
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="glpn"
def __init__( self , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[1, 2, 5, 8] , snake_case=[4, 4, 4, 4] , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.1 , snake_case=1E-6 , snake_case=6_4 , snake_case=1_0 , snake_case=-1 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : List[str] =num_encoder_blocks
_UpperCAmelCase : Optional[Any] =depths
_UpperCAmelCase : str =sr_ratios
_UpperCAmelCase : Dict =hidden_sizes
_UpperCAmelCase : List[str] =patch_sizes
_UpperCAmelCase : Any =strides
_UpperCAmelCase : List[str] =mlp_ratios
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : List[Any] =attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =drop_path_rate
_UpperCAmelCase : str =layer_norm_eps
_UpperCAmelCase : Optional[int] =decoder_hidden_size
_UpperCAmelCase : List[str] =max_depth
_UpperCAmelCase : Dict =head_in_index
| 242
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict ='ZinengTang/tvlt-base'
_UpperCAmelCase : Dict =tempfile.mkdtemp()
def lowerCAmelCase ( self , **snake_case) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Dict:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_image_processor()
_UpperCAmelCase : Optional[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase : str =TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , snake_case)
self.assertIsInstance(processor.image_processor , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =self.get_image_processor()
_UpperCAmelCase : List[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =feature_extractor(snake_case , return_tensors='np')
_UpperCAmelCase : Dict =processor(audio=snake_case , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_image_processor()
_UpperCAmelCase : int =self.get_feature_extractor()
_UpperCAmelCase : int =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Union[str, Any] =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[Any] =image_processor(snake_case , return_tensors='np')
_UpperCAmelCase : List[Any] =processor(images=snake_case , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_image_processor()
_UpperCAmelCase : Dict =self.get_feature_extractor()
_UpperCAmelCase : Optional[int] =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[int] =processor(audio=snake_case , images=snake_case)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.get_image_processor()
_UpperCAmelCase : Tuple =self.get_feature_extractor()
_UpperCAmelCase : Dict =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 242
| 1
|
"""simple docstring"""
import math
def UpperCAmelCase__ (snake_case__ : int = 1_00 ):
"""simple docstring"""
_snake_case : str = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( A_ ):
def __lt__( self : Any , _lowerCamelCase : int ):
return self[-1] < other[-1]
def __eq__( self : int , _lowerCamelCase : Optional[Any] ):
return self[-1] == other[-1]
def _UpperCAmelCase ( __lowerCamelCase : list ) -> list:
_snake_case = []
# sort into stacks
for element in collection:
_snake_case = Stack([element] )
_snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase )
if i != len(__lowerCamelCase ):
stacks[i].append(__lowerCamelCase )
else:
stacks.append(__lowerCamelCase )
# use a heap-based merge to merge stack efficiently
_snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 288
| 0
|
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : Tuple = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = ProphetNetTokenizer
__lowerCAmelCase :Any = False
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = """UNwant\u00E9d,running"""
a__ : Dict = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class(self.vocab_file )
a__ : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : int = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a__ : Dict = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : str = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Optional[Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
a__ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
a__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 266
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = 12
lowercase = 12
lowercase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowercase = TransformeraDModel(**snake_case )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu'
lowercase = self.dummy_vqvae
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_transformer
lowercase = VQDiffusionScheduler(self.num_embed )
lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=snake_case )
lowercase = VQDiffusionPipeline(
vqvae=snake_case , text_encoder=snake_case , tokenizer=snake_case , transformer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
lowercase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'teddy bear playing in the pool'
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = pipe([prompt] , generator=snake_case , num_inference_steps=2 , output_type='np' )
lowercase = output.images
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = pipe(
[prompt] , generator=snake_case , output_type='np' , return_dict=snake_case , num_inference_steps=2 )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu'
lowercase = self.dummy_vqvae
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_transformer
lowercase = VQDiffusionScheduler(self.num_embed )
lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase = VQDiffusionPipeline(
vqvae=snake_case , text_encoder=snake_case , tokenizer=snake_case , transformer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
lowercase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'teddy bear playing in the pool'
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = pipe([prompt] , generator=snake_case , num_inference_steps=2 , output_type='np' )
lowercase = output.images
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = pipe(
[prompt] , generator=snake_case , output_type='np' , return_dict=snake_case , num_inference_steps=2 )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowercase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowercase = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=snake_case , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 195
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ):
lowercase = Path(__SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
lowercase = num_gpus
lowercase = False
if num_gpus > 1:
lowercase = 'MULTI_GPU'
else:
lowercase = 'NO'
elif is_xpu_available() and use_xpu:
lowercase = torch.xpu.device_count()
lowercase = num_xpus
lowercase = False
if num_xpus > 1:
lowercase = 'MULTI_XPU'
else:
lowercase = 'NO'
elif is_npu_available():
lowercase = torch.npu.device_count()
lowercase = num_npus
lowercase = False
if num_npus > 1:
lowercase = 'MULTI_NPU'
else:
lowercase = 'NO'
else:
lowercase = 0
lowercase = True
lowercase = 1
lowercase = 'NO'
lowercase = ClusterConfig(**__SCREAMING_SNAKE_CASE )
config.to_json_file(__SCREAMING_SNAKE_CASE )
return path
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = parser.add_parser('default' , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 195
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a__: str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase=None ):
A__ = self.layer[current_layer](__lowerCamelCase,__lowerCamelCase,head_mask[current_layer] )
A__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = BertEncoderWithPabee(__lowerCamelCase )
self.init_weights()
A__ = 0
A__ = 0
A__ = 0
A__ = 0
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = threshold
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = patience
def UpperCamelCase ( self ):
A__ = 0
A__ = 0
def UpperCamelCase ( self ):
A__ = self.inference_layers_num / self.inference_instances_num
A__ = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=False,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
A__ = input_ids.size()
elif inputs_embeds is not None:
A__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
A__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
if token_type_ids is None:
A__ = torch.zeros(__lowerCamelCase,dtype=torch.long,device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ = self.get_extended_attention_mask(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
A__ , A__ , A__ = encoder_hidden_states.size()
A__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
A__ = self.invert_attention_mask(__lowerCamelCase )
else:
A__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ = self.get_head_mask(__lowerCamelCase,self.config.num_hidden_layers )
A__ = self.embeddings(
input_ids=__lowerCamelCase,position_ids=__lowerCamelCase,token_type_ids=__lowerCamelCase,inputs_embeds=__lowerCamelCase )
A__ = embedding_output
if self.training:
A__ = []
for i in range(self.config.num_hidden_layers ):
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](output_dropout(__lowerCamelCase ) )
res.append(__lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
A__ = self.encoder(
__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase,encoder_hidden_states=__lowerCamelCase,encoder_attention_mask=__lowerCamelCase,)
A__ = self.pooler(encoder_outputs[0] )
A__ = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )]
else:
A__ = 0
A__ = None
A__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](__lowerCamelCase )
if regression:
A__ = logits.detach()
if patient_result is not None:
A__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
A__ = 0
else:
A__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
A__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ):
patient_counter += 1
else:
A__ = 0
A__ = logits
if patient_counter == self.patience:
break
A__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = config.num_labels
A__ = BertModelWithPabee(__lowerCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.ModuleList(
[nn.Linear(config.hidden_size,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,):
A__ = self.bert(
input_ids=__lowerCamelCase,attention_mask=__lowerCamelCase,token_type_ids=__lowerCamelCase,position_ids=__lowerCamelCase,head_mask=__lowerCamelCase,inputs_embeds=__lowerCamelCase,output_dropout=self.dropout,output_layers=self.classifiers,regression=self.num_labels == 1,)
A__ = (logits[-1],)
if labels is not None:
A__ = None
A__ = 0
for ix, logits_item in enumerate(__lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits_item.view(-1 ),labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits_item.view(-1,self.num_labels ),labels.view(-1 ) )
if total_loss is None:
A__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
A__ = (total_loss / total_weights,) + outputs
return outputs
| 39
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A__ :
def __init__( self : Any , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = data
lowerCAmelCase__ : Node | None = None
class A__ :
def __init__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
def __iter__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.head
while self.head:
yield node.data
lowerCAmelCase__ : Any = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(a ) for item in iter(self ) )
def _lowerCamelCase ( self : List[str] , a : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , a )
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
self.insert_nth(0 , a )
def _lowerCamelCase ( self : str , a : int , a : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : int = Node(a )
if self.head is None:
lowerCAmelCase__ : Optional[int] = new_node # first node points itself
lowerCAmelCase__ : Union[str, Any] = new_node
elif index == 0: # insert at head
lowerCAmelCase__ : Dict = self.head
lowerCAmelCase__ : str = new_node
else:
lowerCAmelCase__ : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next
lowerCAmelCase__ : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ : Optional[Any] = new_node
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.delete_nth(0 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _lowerCamelCase ( self : Optional[Any] , a : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : str = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ : str = None
elif index == 0: # delete head node
lowerCAmelCase__ : int = self.tail.next.next
lowerCAmelCase__ : Tuple = self.head.next
else:
lowerCAmelCase__ : str = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[Any] = temp.next
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ : str = temp
return delete_node.data
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Any = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = {}
with open(__A, "r" ) as file:
for line_number, line in enumerate(__A ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(__A, __A ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = ".".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if "lm_head" in full_key else value[0]
UpperCamelCase__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", __A )
if "weight_g" in name:
UpperCAmelCase__ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ = "weight_v"
elif "bias" in name:
UpperCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = "weight"
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(__A, __A, __A, __A, __A )
else:
set_recursively(__A, __A, __A, __A, __A )
return is_used
return is_used
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A, __A, __A, __A, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(__A, __A, __A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True, __A=False ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(__A )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(__A )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(__A )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A, "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(__A, __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__A, )
UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A, tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A, __A, not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 65
|
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None # sigma(t_i)
@classmethod
def UpperCAmelCase_ ( cls ) -> int:
return cls()
@dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
class _lowerCAmelCase ( __A, __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , _lowerCamelCase = 0.02 , _lowerCamelCase = 100 , _lowerCamelCase = 1.007 , _lowerCamelCase = 80 , _lowerCamelCase = 0.05 , _lowerCamelCase = 50 , ) -> Any:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
return KarrasVeSchedulerState.create()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = () ) -> KarrasVeSchedulerState:
A_ : Optional[Any] = jnp.arange(0 , _lowerCamelCase )[::-1].copy()
A_ : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCamelCase , schedule=jnp.array(_lowerCamelCase , dtype=jnp.floataa ) , timesteps=_lowerCamelCase , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
A_ : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : str = random.split(_lowerCamelCase , num=1 )
A_ : str = self.config.s_noise * random.normal(key=_lowerCamelCase , shape=sample.shape )
A_ : Union[str, Any] = sigma + gamma * sigma
A_ : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
A_ : Optional[int] = sample_hat + sigma_hat * model_output
A_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
A_ : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
A_ : Any = sample_prev + sigma_prev * model_output
A_ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
A_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
raise NotImplementedError()
| 164
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 164
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = -1
_UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase = TextStreamer(_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase = cs.out[:-1]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = -1
_UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase = TextIteratorStreamer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase = Thread(target=model.generate , kwargs=_SCREAMING_SNAKE_CASE )
thread.start()
_UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = -1
_UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase = TextStreamer(_SCREAMING_SNAKE_CASE , skip_prompt=_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase = cs.out[:-1]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Tuple )->Any:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = -1
_UpperCAmelCase = torch.ones((1, 5) , device=_SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase = TextStreamer(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
model.generate(_SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=_SCREAMING_SNAKE_CASE , streamer=_SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self : Dict )->Dict:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = -1
_UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TextIteratorStreamer(_SCREAMING_SNAKE_CASE , timeout=0.0_0_1 )
_UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase = Thread(target=model.generate , kwargs=_SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 260
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowercase):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase)
class _a :
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
lowerCAmelCase__ : Tuple = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
lowerCAmelCase__ : Dict = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
lowerCAmelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
lowerCAmelCase__ : Union[str, Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : str = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ : Tuple = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Optional[int] = reader_input['''input_ids''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader_output[:3]
lowerCAmelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
lowerCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase__ : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Union[str, Any] = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowerCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase)
class _a ( _lowercase , _lowercase):
_a : List[str] = VOCAB_FILES_NAMES
_a : str = READER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
| 131
| 0
|
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : int ) -> int:
while b:
__SCREAMING_SNAKE_CASE :Optional[int] = b, a % b
return a
def __lowerCamelCase ( a_ : int , a_ : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(a_ , a % b )
def __lowerCamelCase ( ) -> List[str]:
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 368
|
"""simple docstring"""
import math
import random
def __lowerCamelCase ( a_ : float , a_ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def __lowerCamelCase ( a_ : int , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :Any = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(a_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE :Tuple = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_1_error * sigmoid_function(a_ , a_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Expected value: "))
lowerCamelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 239
| 0
|
"""simple docstring"""
def __magic_name__ ( __snake_case : list[int] ) -> list[int]:
lowercase : Any = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase , lowercase : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_A : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
_A : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 202
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = CpmAntTokenizer
__lowerCAmelCase = False
def __magic_name__ ( self ):
super().setUp()
lowercase : List[str] = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __magic_name__ ( self ):
lowercase : Tuple = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
lowercase : str = "今天天气真好!"
lowercase : Optional[int] = ["今天", "天气", "真", "好", "!"]
lowercase : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : Any = "今天天气真好!"
lowercase : int = [tokenizer.bos_token] + tokens
lowercase : List[str] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
lowercase : str = tokenizer.decode(_a )
self.assertEqual(_a , _a )
| 202
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCamelCase : Any = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCamelCase : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCamelCase : Tuple = dict(zip(vocab, range(len(vocab))))
__UpperCamelCase : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Union[str, Any] = Path(tmpdirname)
__UpperCamelCase : int = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCamelCase : str = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCamelCase : List[str] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCamelCase : Union[str, Any] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCamelCase : str = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__UpperCamelCase : Any = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCamelCase : Any = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 365
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
a_ : List[Any] = Rectangle(height=0.25 , width=0.25 )
a_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a_ : str = [mem.copy() for i in range(6 )]
a_ : Tuple = [mem.copy() for i in range(6 )]
a_ : Any = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[Any] = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[Any] = Text('CPU' , font_size=2_4 )
a_ : Any = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = [mem.copy() for i in range(4 )]
a_ : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Any = Text('GPU' , font_size=2_4 )
a_ : Optional[Any] = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = [mem.copy() for i in range(6 )]
a_ : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : List[str] = Text('Model' , font_size=2_4 )
a_ : int = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Dict = []
a_ : str = []
a_ : int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
rect.set_stroke(SCREAMING_SNAKE_CASE__ )
a_ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE__ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
a_ : Tuple = [mem.copy() for i in range(6 )]
a_ : Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Dict = Text('Loaded Checkpoint' , font_size=2_4 )
a_ : str = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Dict = []
a_ : Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE__ )
ckpt_arr.append(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
a_ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a_ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(SCREAMING_SNAKE_CASE__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : str = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
a_ : List[Any] = [meta_mem.copy() for i in range(6 )]
a_ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
a_ : int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Tuple = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Dict = Text('Disk' , font_size=2_4 )
a_ : Optional[Any] = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE__ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) )
a_ : List[Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(FadeOut(SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) , )
self.wait()
| 32
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 36
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_SCREAMING_SNAKE_CASE = 2_0_4_8
_SCREAMING_SNAKE_CASE = 4_0_9_6
_SCREAMING_SNAKE_CASE = 4_2
_SCREAMING_SNAKE_CASE = os.environ.pop("""PROCESS_TRAIN""", """false""")
_SCREAMING_SNAKE_CASE = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
def choose_first(UpperCamelCase_ , UpperCamelCase_=False ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCamelCase = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
UpperCamelCase = {"""id""": example["""id"""]}
UpperCamelCase = example["""annotations"""]
UpperCamelCase = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCamelCase = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = ["""<cls>"""]
else:
UpperCamelCase = ["""short"""]
UpperCamelCase = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
UpperCamelCase = ["""long"""]
UpperCamelCase = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCAmelCase )
UpperCamelCase = []
answer.update(_lowerCAmelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCamelCase = True
else:
UpperCamelCase = False
UpperCamelCase = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCAmelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = _get_single_answer(_lowerCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = example["""document"""]["""tokens"""]
UpperCamelCase = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCamelCase = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCamelCase = example["""document"""]["""tokens"""]
UpperCamelCase = answer["""start_token"""]
UpperCamelCase = answer["""end_token"""]
UpperCamelCase = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCamelCase = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCamelCase = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
UpperCamelCase = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
UpperCamelCase = """ """.join([old[i] for i in range(len(_lowerCAmelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCAmelCase , end="""\n""" )
print("""Old:""" , _lowerCAmelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=2048 , UpperCamelCase_=4096 , UpperCamelCase_=True ) -> Any:
'''simple docstring'''
# overlap will be of doc_stride - q_len
UpperCamelCase = get_context_and_ans(_lowerCAmelCase , assertion=_lowerCAmelCase )
UpperCamelCase = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCamelCase = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCAmelCase ),
"end_token": [-100] * len(_lowerCAmelCase ),
"category": category,
},
}
UpperCamelCase = out["""context"""].split()
UpperCamelCase = splitted_context[answer["""end_token"""]]
UpperCamelCase = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCAmelCase , ).input_ids )
UpperCamelCase = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCamelCase = len(tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCamelCase = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
UpperCamelCase = answer["""start_token"""]
UpperCamelCase = answer["""end_token"""]
if assertion:
UpperCamelCase = tokenizer.decode(_lowerCAmelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCAmelCase , end="""\n\n""" )
if len(_lowerCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCamelCase = start_token - i + q_len
UpperCamelCase = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
UpperCamelCase = -100
UpperCamelCase = -100
answers_category.append("""null""" )
UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCAmelCase )
answers_end_token.append(_lowerCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCAmelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCAmelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=2048 , UpperCamelCase_=4096 , UpperCamelCase_=False ) -> Dict:
'''simple docstring'''
UpperCamelCase = get_strided_contexts_and_ans(
_lowerCAmelCase , _lowerCAmelCase , doc_stride=_lowerCAmelCase , max_length=_lowerCAmelCase , assertion=_lowerCAmelCase , )
return example
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
with jsonlines.open(_lowerCAmelCase , """a""" ) as writer:
for example in tqdm(_lowerCAmelCase , total=len(_lowerCAmelCase ) , desc="""Saving samples ... """ ):
UpperCamelCase = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_SCREAMING_SNAKE_CASE = load_dataset("""natural_questions""")
_SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_SCREAMING_SNAKE_CASE = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_SCREAMING_SNAKE_CASE = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_SCREAMING_SNAKE_CASE = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_SCREAMING_SNAKE_CASE = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_SCREAMING_SNAKE_CASE = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 371
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """whisper"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowerCamelCase_ : Tuple=5_1865 , lowerCamelCase_ : Dict=80 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=1536 , lowerCamelCase_ : int=1536 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : str=5_0257 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=1500 , lowerCamelCase_ : Dict=448 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : List[Any]=5_0256 , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=[220, 5_0256] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict=256 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[Any]=0.0_5 , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Optional[int]=7 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , suppress_tokens=lowerCamelCase_ , begin_suppress_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase = {0: """batch"""}
else:
UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
return common_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 2_2050 , lowerCamelCase_ : float = 5.0 , lowerCamelCase_ : int = 220 , ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , time_duration=lowerCamelCase_ , frequency=lowerCamelCase_ , )
UpperCamelCase = encoder_inputs["""input_features"""].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = encoder_inputs.pop("""input_features""" )
UpperCamelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return 1E-3
| 165
| 0
|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
for ch in input_str:
lowercase = ord(lowerCAmelCase__ )
lowercase = pow(2 , lowerCAmelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCamelCase ( a_ : str ) -> List[str]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(a_ , '''_dynamo''' ):
return False
return isinstance(a_ , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCamelCase ( a_ : int , a_ : bool = True ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__SCREAMING_SNAKE_CASE :Optional[Any] = is_compiled_module(a_ )
if is_compiled:
__SCREAMING_SNAKE_CASE :Tuple = model
__SCREAMING_SNAKE_CASE :Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = model.module
if not keep_fpaa_wrapper:
__SCREAMING_SNAKE_CASE :Dict = getattr(a_ , '''forward''' )
__SCREAMING_SNAKE_CASE :Any = model.__dict__.pop('''_original_forward''' , a_ )
if original_forward is not None:
while hasattr(a_ , '''__wrapped__''' ):
__SCREAMING_SNAKE_CASE :List[str] = forward.__wrapped__
if forward == original_forward:
break
__SCREAMING_SNAKE_CASE :Any = forward
if getattr(a_ , '''_converted_to_transformer_engine''' , a_ ):
convert_model(a_ , to_transformer_engine=a_ )
if is_compiled:
__SCREAMING_SNAKE_CASE :Optional[int] = model
__SCREAMING_SNAKE_CASE :Optional[int] = compiled_model
return model
def __lowerCamelCase ( ) -> str:
PartialState().wait_for_everyone()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[int] ) -> int:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(a_ , a_ )
elif PartialState().local_process_index == 0:
torch.save(a_ , a_ )
@contextmanager
def __lowerCamelCase ( **a_ : int ) -> Tuple:
for key, value in kwargs.items():
__SCREAMING_SNAKE_CASE :Any = str(a_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCamelCase ( a_ : List[str] ) -> List[Any]:
if not hasattr(a_ , '''__qualname__''' ) and not hasattr(a_ , '''__name__''' ):
__SCREAMING_SNAKE_CASE :List[Any] = getattr(a_ , '''__class__''' , a_ )
if hasattr(a_ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(a_ , '''__name__''' ):
return obj.__name__
return str(a_ )
def __lowerCamelCase ( a_ : List[Any] , a_ : List[str] ) -> Tuple:
for key, value in source.items():
if isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = destination.setdefault(a_ , {} )
merge_dicts(a_ , a_ )
else:
__SCREAMING_SNAKE_CASE :str = value
return destination
def __lowerCamelCase ( a_ : int = None ) -> bool:
if port is None:
__SCREAMING_SNAKE_CASE :Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 371
|
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 239
| 0
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowercase , __lowercase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__UpperCAmelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319
| 0
|
'''simple docstring'''
from math import factorial, pi
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : int = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowercase_ : Optional[int] = float(UpperCAmelCase__ )
lowercase_ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : int = 30 ) -> float:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowercase_ : List[str] = float(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 21
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 50 ):
__SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase__ ( a__ = True , *a__ , **a__ ) ->Dict:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
_UpperCamelCase = False
if main_process_only:
_UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*a__ , **a__ , disable=a__ )
| 358
|
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = max(len(a__ ) , len(a__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["projector.weight"]
UpperCAmelCase_ = downstream_dict["projector.bias"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["model.linear.weight"]
UpperCAmelCase_ = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
UpperCAmelCase_ = downstream_dict["connector.weight"]
UpperCAmelCase_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = checkpoint["Downstream"]
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a = '''facebook/wmt19-en-de'''
__a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__a = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__a = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
__a = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 362
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : List[Any] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=_UpperCamelCase, max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] = datasets.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Tuple = 16
elif accelerator.mixed_precision != "no":
lowercase : str = 8
else:
lowercase : List[str] = None
return tokenizer.pad(
_UpperCamelCase, padding='''longest''', max_length=_UpperCamelCase, pad_to_multiple_of=_UpperCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : int = DataLoader(
tokenized_datasets['''train'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', _UpperCamelCase ) == "1":
lowercase : Tuple = 2
# New Code #
lowercase : Optional[int] = int(args.gradient_accumulation_steps )
lowercase : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : Tuple = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict = config['''lr''']
lowercase : List[str] = int(config['''num_epochs'''] )
lowercase : str = int(config['''seed'''] )
lowercase : str = int(config['''batch_size'''] )
lowercase : Any = evaluate.load('''glue''', '''mrpc''' )
set_seed(_UpperCamelCase )
lowercase , lowercase : Dict = get_dataloaders(_UpperCamelCase, _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : int = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=_UpperCamelCase )
# Instantiate scheduler
lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = accelerator.prepare(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase, model=_UpperCamelCase, local_sgd_steps=_UpperCamelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
lowercase : int = model(**_UpperCamelCase )
lowercase : Optional[int] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[int] = model(**_UpperCamelCase )
lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowercase , lowercase : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase, references=_UpperCamelCase, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _UpperCamelCase )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=_UpperCamelCase, default=_UpperCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=_UpperCamelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=_UpperCamelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
main()
| 173
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ConditionalDetrFeatureExtractor"]
__a = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 1
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=64 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> List[str]:
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_input_mask
a =use_token_type_ids
a =use_labels
a =vocab_size
a =hidden_size
a =embedding_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =num_choices
a =scope
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =None
if self.use_input_mask:
a =random_attention_mask([self.batch_size, self.seq_length] )
a =None
if self.use_token_type_ids:
a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a =None
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a =ids_tensor([self.batch_size] , self.num_choices )
a =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
a =MobileBertModel(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A )
a =model(__A , token_type_ids=__A )
a =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
a =MobileBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
a =MobileBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
a =MobileBertForPreTraining(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
a =MobileBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
a =self.num_labels
a =MobileBertForSequenceClassification(__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> int:
a =self.num_labels
a =MobileBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
a =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
a =self.num_choices
a =MobileBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =config_and_inputs
a ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> List[str]:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
a =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =MobileBertModelTester(self )
a =ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__A )
def _A ( lowercase ):
"""simple docstring"""
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
lowerCamelCase_ : Optional[int] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__A )
a =_long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a =model(__A )[0]
a =torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __A )
a =torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=__A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a =torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a =torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215
| 0
|
import baseaa
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
return baseaa.aaaencode(string.encode("utf-8" ) )
def _snake_case ( lowerCAmelCase : bytes ):
"""simple docstring"""
return baseaa.aaadecode(lowerCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18
| 1
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Dict = str(__UpperCamelCase )
while len(__UpperCamelCase ) != 1:
lowerCAmelCase_ : str = [int(__UpperCamelCase ) for i in num_string]
lowerCAmelCase_ : Optional[Any] = 1
for i in range(0 , len(__UpperCamelCase ) ):
total *= numbers[i]
lowerCAmelCase_ : Optional[Any] = str(__UpperCamelCase )
steps += 1
return steps
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Optional[int] = str(__UpperCamelCase )
while len(__UpperCamelCase ) != 1:
lowerCAmelCase_ : List[Any] = [int(__UpperCamelCase ) for i in num_string]
lowerCAmelCase_ : Any = 0
for i in range(0 , len(__UpperCamelCase ) ):
total += numbers[i]
lowerCAmelCase_ : str = str(__UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import argparse
import os
import re
lowercase__ = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(r"""\[([^\]]+)\]""")
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
lowerCAmelCase_ : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
lowerCAmelCase_ : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Any = []
else:
blocks.append("\n".join(__UpperCamelCase ) )
lowerCAmelCase_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append("\n".join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
def _inner(__UpperCamelCase ):
return key(__UpperCamelCase ).lower().replace("_" , "" )
return _inner
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
"""simple docstring"""
def noop(__UpperCamelCase ):
return x
if key is None:
lowerCAmelCase_ : Optional[int] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
lowerCAmelCase_ : Dict = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _replace(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCAmelCase_ : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]"
lowerCAmelCase_ : Union[str, Any] = import_statement.split("\n" )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : Optional[int] = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ : Optional[Any] = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : List[Any] = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
lowerCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Any = keys[:-1]
lowerCAmelCase_ : Dict = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
with open(__UpperCamelCase , encoding="utf-8" ) as f:
lowerCAmelCase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : int = split_code_in_indented_blocks(
__UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[int] = main_blocks[block_idx]
lowerCAmelCase_ : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ : str = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : Tuple = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : List[Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Dict = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Any = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
lowerCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__UpperCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase_ : Dict = sort_imports(os.path.join(__UpperCamelCase , "__init__.py" ) , check_only=__UpperCamelCase )
if result:
lowerCAmelCase_ : Union[str, Any] = [os.path.join(__UpperCamelCase , "__init__.py" )]
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 161
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
snake_case_ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
snake_case_ = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowercase__ : Tuple = json.loads(f.read() )
lowercase__ : Dict = collections.OrderedDict()
lowercase__ : List[str] = collections.OrderedDict()
lowercase__ : List[str] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowercase__ : Optional[Any] = f.readlines()
lowercase__ : List[Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Dict = b
lowercase__ : str = idx
for wd in b:
lowercase__ : Optional[int] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a , a="<|endoftext|>" , a="<|endoftext|>" , a="<|startoftext|>" , a="<|endoftext|>" , a=False , **a , ):
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
lowercase__ : str = do_clean_text
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = load_vocab_and_emoji(a , a)
lowercase__ : Dict = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def snake_case_ ( self):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab)
def snake_case_ ( self):
return dict(self.raw_vocab , **self.added_tokens_encoder)
def snake_case_ ( self , a):
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def snake_case_ ( self , a):
return self.vocab.get(a , self.vocab.get(self.unk_token))
def snake_case_ ( self , a):
return self.subword_tokenizer.convert_id_to_token(a)
def snake_case_ ( self , a):
lowercase__ : int = ''.join(a).strip()
return out_string
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
lowercase__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , a , a = None):
lowercase__ : Optional[int] = 0
if os.path.isdir(a):
lowercase__ : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
lowercase__ : Union[str, Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowercase__ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!')
lowercase__ : Union[str, Any] = token_index
writer.write(','.join(a) + '\n')
index += 1
with open(a , 'w' , encoding='utf-8') as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a , a):
lowercase__ : Union[str, Any] = vocab # same as swe
lowercase__ : str = ids_to_tokens # same as bpe
lowercase__ : int = emoji
lowercase__ : Any = np.max([len(a) for w in self.vocab.keys()])
lowercase__ : int = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)')
lowercase__ : Tuple = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*')
lowercase__ : str = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
lowercase__ : Dict = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
lowercase__ : Dict = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
lowercase__ : Union[str, Any] = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
lowercase__ : Optional[Any] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowercase__ : List[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowercase__ : Dict = str.maketrans({k: '<BLOCK>' for k in keisen + blocks})
def __len__( self):
return len(self.ids_to_tokens)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.content_repattera.sub('<URL>' , a)
lowercase__ : List[str] = self.content_repattera.sub('<EMAIL>' , a)
lowercase__ : List[Any] = self.content_repattera.sub('<TEL>' , a)
lowercase__ : Optional[Any] = self.content_repattera.sub('<DATE>' , a)
lowercase__ : Optional[int] = self.content_repattera.sub('<DATE>' , a)
lowercase__ : Dict = self.content_repattera.sub('<PRICE>' , a)
lowercase__ : List[str] = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
lowercase__ : Optional[Any] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>')
return content
def snake_case_ ( self , a , a=False):
lowercase__ : str = text.replace(' ' , '<SP>')
lowercase__ : Any = text.replace(' ' , '<SP>')
lowercase__ : List[str] = text.replace('\r\n' , '<BR>')
lowercase__ : List[Any] = text.replace('\n' , '<BR>')
lowercase__ : List[str] = text.replace('\r' , '<BR>')
lowercase__ : int = text.replace('\t' , '<TAB>')
lowercase__ : List[Any] = text.replace('—' , 'ー')
lowercase__ : Union[str, Any] = text.replace('−' , 'ー')
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase__ : Optional[Any] = text.replace(a , a)
if clean:
lowercase__ : Tuple = self.clean_text(a)
def check_simbol(a):
lowercase__ : Dict = x.encode()
if len(a) == 1 and len(a) == 2:
lowercase__ : Union[str, Any] = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0xC_2A1 and c <= 0xC_2BF)
or (c >= 0xC_780 and c <= 0xC_783)
or (c >= 0xC_AB9 and c <= 0xC_BBF)
or (c >= 0xC_C80 and c <= 0xC_DA2)
):
return True
return False
def checkuae(a):
lowercase__ : Dict = x.encode()
if len(a) == 1 and len(a) == 3:
lowercase__ : str = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0xE28_080 and c <= 0xE2B_07F:
return True
return False
lowercase__ : Optional[Any] = 0
lowercase__ : Union[str, Any] = []
while pos < len(a):
lowercase__ : str = min(len(a) , pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
lowercase__ : Any = [] # (token_id, token, pos)
for e in range(a , a , -1):
lowercase__ : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
lowercase__ : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
lowercase__ , lowercase__ , lowercase__ : str = sorted(a , key=lambda a: x[0])[0]
result.append(a)
lowercase__ : str = e
else:
lowercase__ : str = pos + 1
lowercase__ : Union[str, Any] = text[pos:end]
if check_simbol(a):
result.append('<KIGOU>')
elif checkuae(a):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
lowercase__ : str = end
return result
def snake_case_ ( self , a , a="\n"):
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = []
lowercase__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode('utf-8' , errors='replace'))
lowercase__ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word])
elif word == "<SP>":
words.append(' ')
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append('\t')
elif word == "<BLOCK>":
words.append('▀')
elif word == "<KIGOU>":
words.append('ǀ')
elif word == "<U2000U2BFF>":
words.append('‖')
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode('utf-8' , errors='replace'))
lowercase__ : str = ''.join(a)
return text
| 214
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case_ = logging.getLogger(__name__)
snake_case_ = 50 # max width of layer names
snake_case_ = 70 # max width of quantizer names
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : str = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if args.calibrator == "max":
lowercase__ : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowercase__ : Optional[Any] = 'histogram'
elif args.calibrator == "mse":
lowercase__ : int = 'histogram'
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
lowercase__ : Any = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Any=False ):
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [''] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE_ )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
def fusea(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowercase__ : List[Any] = qq._amax.detach().item()
lowercase__ : Optional[int] = qk._amax.detach().item()
lowercase__ : List[str] = qv._amax.detach().item()
lowercase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
qq._amax.fill_(SCREAMING_SNAKE_CASE_ )
qk._amax.fill_(SCREAMING_SNAKE_CASE_ )
qv._amax.fill_(SCREAMING_SNAKE_CASE_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowercase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowercase__ : Union[str, Any] = mod.weight.shape[0]
lowercase__ : str = mod._weight_quantizer._amax.detach()
lowercase__ : Any = torch.ones(SCREAMING_SNAKE_CASE_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowercase__ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowercase__ : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
lowercase__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE_ , keepdims=SCREAMING_SNAKE_CASE_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowercase__ : Union[str, Any] = amax
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=25 , SCREAMING_SNAKE_CASE_ : Any=180 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
lowercase__ : Tuple = []
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : str = [ignore]
lowercase__ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
for name, mod in model.named_modules():
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ )
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE_ ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE_ ) is str and s in name]:
continue
lowercase__ : Optional[int] = f"""Act:{input_q.extra_repr()}"""
lowercase__ : Dict = f"""Wgt:{weight_q.extra_repr()}"""
lowercase__ : Tuple = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE_ ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="both" , **SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : Optional[int] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
set_quantizers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Any = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
| 214
| 1
|
from __future__ import annotations
_UpperCamelCase = "#"
class lowercase :
'''simple docstring'''
def __init__(self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {}
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self._trie
for char in text:
if char not in trie:
UpperCAmelCase__ = {}
UpperCAmelCase__ = trie[char]
UpperCAmelCase__ = True
def UpperCamelCase__ (self , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCAmelCase__ = trie[char]
else:
return []
return self._elements(a_ )
def UpperCamelCase__ (self , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = []
for c, v in d.items():
UpperCAmelCase__ = [' '] if c == END else [(c + s) for s in self._elements(a_ )]
result.extend(a_ )
return tuple(a_ )
_UpperCamelCase = Trie()
_UpperCamelCase = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def UpperCamelCase_( snake_case__: str ) -> tuple:
UpperCAmelCase__ = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def UpperCamelCase_( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359
|
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A : List[str] = '''▁'''
_A : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class a__ ( UpperCamelCase_, unittest.TestCase ):
__lowerCAmelCase = BertGenerationTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def __magic_name__ ( self ):
super().setUp()
lowercase : Any = BertGenerationTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = '''<s>'''
lowercase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __magic_name__ ( self ):
lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(A_ ) , 1_002 )
def __magic_name__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __magic_name__ ( self ):
lowercase : int = BertGenerationTokenizer(A_ , keep_accents=A_ )
lowercase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [285, 46, 10, 170, 382] , )
lowercase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : int = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __magic_name__ ( self ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __magic_name__ ( self ):
lowercase : Tuple = '''Hello World!'''
lowercase : Any = [18_536, 2_260, 101]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __magic_name__ ( self ):
lowercase : str = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : List[Any] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def __magic_name__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowercase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : List[str] = ''' '''.join(A_ )
lowercase : Dict = self.big_tokenizer.encode_plus(A_ , return_tensors="pt" , return_token_type_ids=A_ )
lowercase : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=A_ )
lowercase : Tuple = BertGenerationConfig()
lowercase : str = BertGenerationEncoder(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def __magic_name__ ( self ):
# fmt: off
lowercase : str = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 202
|
A__ : Any = '''Tobias Carryer'''
from time import time
class __snake_case :
def __init__( self : Any , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : str=int(time())): # noqa: B008
lowerCAmelCase_ : int = multiplier
lowerCAmelCase_ : int = increment
lowerCAmelCase_ : str = modulo
lowerCAmelCase_ : str = seed
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A__ : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 103
| 0
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase = True
except ImportError:
__lowercase = False
try:
from torch.hub import _get_torch_home
__lowercase = _get_torch_home()
except ImportError:
__lowercase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
__lowercase = os.path.join(torch_cache_home, """transformers""")
__lowercase = """https://cdn.huggingface.co"""
__lowercase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__lowercase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
__lowercase = os.path.join(PATH, """config.yaml""")
__lowercase = os.path.join(PATH, """attributes.txt""")
__lowercase = os.path.join(PATH, """objects.txt""")
__lowercase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
__lowercase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
__lowercase = """pytorch_model.bin"""
__lowercase = """config.yaml"""
def lowercase ( A_=OBJECTS , A_=ATTRIBUTES )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a : Union[str, Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = OrderedDict()
with open(A_ , "rb" ) as f:
a : Optional[Any] = pkl.load(A_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a : Dict = ckp.pop(A_ )
if isinstance(A_ , np.ndarray ):
a : Optional[Any] = torch.tensor(A_ )
else:
assert isinstance(A_ , torch.tensor ), type(A_ )
a : int = v
return r
class _A :
"""simple docstring"""
UpperCAmelCase : int = {}
def __init__( self : Any , __UpperCAmelCase : dict , __UpperCAmelCase : str = "root" , __UpperCAmelCase : Optional[int]=0):
a : List[str] = name
a : Tuple = level
a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a : List[Any] = copy.deepcopy(__UpperCAmelCase)
a : int = copy.deepcopy(__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1)
a : Dict = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase)
a : Tuple = d
def __repr__( self : List[str]):
return str(list((self._pointer.keys())))
def __setattr__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : Optional[Any] = val
a : Tuple = val
a : Dict = key.split(".")
a : Union[str, Any] = len(__UpperCAmelCase) - 1
a : Optional[int] = self._pointer
if len(__UpperCAmelCase) > 1:
for i, l in enumerate(__UpperCAmelCase):
if hasattr(self , __UpperCAmelCase) and isinstance(getattr(self , __UpperCAmelCase) , __UpperCAmelCase):
setattr(getattr(self , __UpperCAmelCase) , ".".join(levels[i:]) , __UpperCAmelCase)
if l == last_level:
a : int = val
else:
a : str = pointer[l]
def __snake_case ( self : str):
return self._pointer
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
with open(f'''{file_name}''' , "w") as stream:
dump(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
with open(f'''{file_name}''' , "w") as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Dict):
with open(__UpperCAmelCase) as stream:
a : List[str] = load(__UpperCAmelCase , Loader=__UpperCAmelCase)
return data
def __str__( self : Tuple):
a : str = " "
if self._name != "root":
a : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
a : Optional[Any] = ""
a : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase).__name__})\n'''
a : Tuple = level
return r[:-1]
@classmethod
def __snake_case ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
a , a : Tuple = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
return cls(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : str , **__UpperCAmelCase : List[str]):
a : int = kwargs.pop("cache_dir" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("force_download" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("resume_download" , __UpperCAmelCase)
a : Tuple = kwargs.pop("proxies" , __UpperCAmelCase)
a : int = kwargs.pop("local_files_only" , __UpperCAmelCase)
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase)
elif os.path.isfile(__UpperCAmelCase) or is_remote_url(__UpperCAmelCase):
a : List[Any] = pretrained_model_name_or_path
else:
a : int = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase)
try:
# Load from URL or cache if already cached
a : Optional[Any] = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a : Union[str, Any] = Config.load_yaml(__UpperCAmelCase)
except EnvironmentError:
a : str = "Can't load config for"
raise EnvironmentError(__UpperCAmelCase)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(__UpperCAmelCase), kwargs
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
a : Any = in_tensor.numpy()
a : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = urlparse(A_ )
return parsed.scheme in ("http", "https")
def lowercase ( A_ , A_ , A_=True )-> str:
'''simple docstring'''
a : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a : str = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( A_ , A_ , A_=None , A_=0 , A_=None , )-> List[str]:
'''simple docstring'''
a : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A_ , A_ ):
ua += "; " + "; ".join("{}/{}".format(A_ , A_ ) for k, v in user_agent.items() )
elif isinstance(A_ , A_ ):
ua += "; " + user_agent
a : str = {"user-agent": ua}
if resume_size > 0:
a : List[Any] = "bytes=%d-" % (resume_size,)
a : str = requests.get(A_ , stream=A_ , proxies=A_ , headers=A_ )
if response.status_code == 416: # Range not satisfiable
return
a : Optional[int] = response.headers.get("Content-Length" )
a : List[Any] = resume_size + int(A_ ) if content_length is not None else None
a : List[Any] = tqdm(
unit="B" , unit_scale=A_ , total=A_ , initial=A_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A_ ) )
temp_file.write(A_ )
progress.close()
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=10 , A_=False , A_=None , A_=False , )-> str:
'''simple docstring'''
if cache_dir is None:
a : List[Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : Tuple = str(A_ )
os.makedirs(A_ , exist_ok=A_ )
a : Optional[Any] = None
if not local_files_only:
try:
a : Dict = requests.head(A_ , allow_redirects=A_ , proxies=A_ , timeout=A_ )
if response.status_code == 200:
a : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a : List[str] = url_to_filename(A_ , A_ )
# get cache path to put the file
a : List[str] = os.path.join(A_ , A_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A_ ):
return cache_path
else:
a : Any = [
file
for file in fnmatch.filter(os.listdir(A_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A_ ) > 0:
return os.path.join(A_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a : Dict = cache_path + ".lock"
with FileLock(A_ ):
# If the download just completed while the lock was activated.
if os.path.exists(A_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a : Optional[Any] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A_ , "a+b" ) as f:
yield f
a : Tuple = _resumable_file_manager
if os.path.exists(A_ ):
a : Optional[Any] = os.stat(A_ ).st_size
else:
a : Optional[int] = 0
else:
a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=A_ , delete=A_ )
a : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A_ , temp_file.name , )
http_get(
A_ , A_ , proxies=A_ , resume_size=A_ , user_agent=A_ , )
os.replace(temp_file.name , A_ )
a : List[str] = {"url": url, "etag": etag}
a : Tuple = cache_path + ".json"
with open(A_ , "w" ) as meta_file:
json.dump(A_ , A_ )
return cache_path
def lowercase ( A_ , A_=None )-> Any:
'''simple docstring'''
a : Dict = url.encode("utf-8" )
a : Optional[Any] = shaaaa(A_ )
a : Any = url_hash.hexdigest()
if etag:
a : Union[str, Any] = etag.encode("utf-8" )
a : Tuple = shaaaa(A_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=False , A_=None , A_=False , A_=False , A_=False , )-> Tuple:
'''simple docstring'''
if cache_dir is None:
a : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : List[Any] = str(A_ )
if isinstance(A_ , A_ ):
a : int = str(A_ )
if is_remote_url(A_ ):
# URL, so get it from the cache (downloading if necessary)
a : Optional[Any] = get_from_cache(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , user_agent=A_ , local_files_only=A_ , )
elif os.path.exists(A_ ):
# File, and it exists.
a : Union[str, Any] = url_or_filename
elif urlparse(A_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A_ ) )
if extract_compressed_file:
if not is_zipfile(A_ ) and not tarfile.is_tarfile(A_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a , a : Dict = os.path.split(A_ )
a : List[str] = output_file.replace("." , "-" ) + "-extracted"
a : Optional[Any] = os.path.join(A_ , A_ )
if os.path.isdir(A_ ) and os.listdir(A_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a : Tuple = output_path + ".lock"
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
os.makedirs(A_ )
if is_zipfile(A_ ):
with ZipFile(A_ , "r" ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
elif tarfile.is_tarfile(A_ ):
a : List[str] = tarfile.open(A_ )
tar_file.extractall(A_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A_ ) )
return output_path_extracted
return output_path
def lowercase ( A_ , A_="," )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
with open(A_ ) as f:
a : str = eval(f.read() )
else:
a : List[Any] = requests.get(A_ )
try:
a : Any = requests.json()
except Exception:
a : Any = req.content.decode()
assert data is not None, "could not connect"
try:
a : Optional[Any] = eval(A_ )
except Exception:
a : Any = data.split("\n" )
req.close()
return data
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[int] = requests.get(A_ )
a : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A_ )
with open(A_ , "rb" ) as stream:
a : Any = pkl.load(A_ )
a : List[str] = weights.pop("model" )
a : Dict = {}
for k, v in model.items():
a : List[str] = torch.from_numpy(A_ )
if "running_var" in k:
a : Dict = torch.tensor([0] )
a : Any = k.replace("running_var" , "num_batches_tracked" )
a : List[Any] = zero
return new
def lowercase ( )-> Optional[int]:
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(A_ , os.pardir ) )}/demo.ipynb''' )
def lowercase ( A_ , A_="RGB" )-> Any:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
a : Dict = cva.imread(A_ )
else:
a : Union[str, Any] = get_image_from_url(A_ )
assert img is not None, F'''could not connect to: {im}'''
a : int = cva.cvtColor(A_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a : List[str] = img[:, :, ::-1]
return img
def lowercase ( A_ , A_=1 )-> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A_ ) , A_ ))
| 226
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ :List[str] = datasets.utils.logging.get_logger(__name__)
lowercase__ :Tuple = ["names", "prefix"]
lowercase__ :List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ :Any = ["encoding_errors", "on_bad_lines"]
lowercase__ :Optional[int] = ["date_format"]
@dataclass
class lowercase ( datasets.BuilderConfig ):
lowercase_ : str =","
lowercase_ : Optional[str] =None
lowercase_ : Optional[Union[int, List[int], str]] ="infer"
lowercase_ : Optional[List[str]] =None
lowercase_ : Optional[List[str]] =None
lowercase_ : Optional[Union[int, str, List[int], List[str]]] =None
lowercase_ : Optional[Union[List[int], List[str]]] =None
lowercase_ : Optional[str] =None
lowercase_ : bool =True
lowercase_ : Optional[Literal["c", "python", "pyarrow"]] =None
lowercase_ : Dict[Union[int, str], Callable[[Any], Any]] =None
lowercase_ : Optional[list] =None
lowercase_ : Optional[list] =None
lowercase_ : bool =False
lowercase_ : Optional[Union[int, List[int]]] =None
lowercase_ : Optional[int] =None
lowercase_ : Optional[Union[str, List[str]]] =None
lowercase_ : bool =True
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : bool =True
lowercase_ : Optional[str] =None
lowercase_ : str ="."
lowercase_ : Optional[str] =None
lowercase_ : str ='"'
lowercase_ : int =0
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : Optional[str] =None
lowercase_ : bool =True
lowercase_ : bool =True
lowercase_ : int =0
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : Optional[str] =None
lowercase_ : int =10000
lowercase_ : Optional[datasets.Features] =None
lowercase_ : Optional[str] ="strict"
lowercase_ : Literal["error", "warn", "skip"] ="error"
lowercase_ : Optional[str] =None
def A__ ( self):
if self.delimiter is not None:
lowercase = self.delimiter
if self.column_names is not None:
lowercase = self.column_names
@property
def A__ ( self):
lowercase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A__):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder ):
lowercase_ : Optional[int] =CsvConfig
def A__ ( self):
return datasets.DatasetInfo(features=self.config.features)
def A__ ( self ,A__):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}')
lowercase = dl_manager.download_and_extract(self.config.data_files)
if isinstance(A__ ,(str, list, tuple)):
lowercase = data_files
if isinstance(A__ ,A__):
lowercase = [files]
lowercase = [dl_manager.iter_files(A__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'''files''': files})]
lowercase = []
for split_name, files in data_files.items():
if isinstance(A__ ,A__):
lowercase = [files]
lowercase = [dl_manager.iter_files(A__) for file in files]
splits.append(datasets.SplitGenerator(name=A__ ,gen_kwargs={'''files''': files}))
return splits
def A__ ( self ,A__):
if self.config.features is not None:
lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(A__) for feature in self.config.features.values()):
# cheaper cast
lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A__)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase = table_cast(A__ ,A__)
return pa_table
def A__ ( self ,A__):
lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__)):
lowercase = pd.read_csv(A__ ,iterator=A__ ,dtype=A__ ,**self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(A__):
lowercase = pa.Table.from_pandas(A__)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__)
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(A__)}: {e}')
raise
| 101
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "camembert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[str] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Tuple = position_embedding_type
A : str = use_cache
A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Any = "beit"
def __init__( self : int , _A : Optional[int]=8192 , _A : Optional[Any]=768 , _A : Dict=12 , _A : int=12 , _A : List[Any]=3072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.0 , _A : Union[str, Any]=0.0 , _A : Tuple=0.0_2 , _A : Optional[int]=1E-12 , _A : Union[str, Any]=224 , _A : int=16 , _A : str=3 , _A : Optional[Any]=False , _A : Any=False , _A : Optional[Any]=False , _A : List[str]=False , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : List[str]=True , _A : List[Any]=[3, 5, 7, 11] , _A : Any=[1, 2, 3, 6] , _A : Dict=True , _A : int=0.4 , _A : str=256 , _A : int=1 , _A : Dict=False , _A : Optional[Any]=255 , **_A : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : Any = layer_norm_eps
snake_case_ : Tuple = image_size
snake_case_ : List[str] = patch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = use_mask_token
snake_case_ : str = use_absolute_position_embeddings
snake_case_ : List[str] = use_relative_position_bias
snake_case_ : Optional[int] = use_shared_relative_position_bias
snake_case_ : List[str] = layer_scale_init_value
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ : Optional[Any] = out_indices
snake_case_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ : Tuple = use_auxiliary_head
snake_case_ : str = auxiliary_loss_weight
snake_case_ : Union[str, Any] = auxiliary_channels
snake_case_ : List[Any] = auxiliary_num_convs
snake_case_ : Tuple = auxiliary_concat_input
snake_case_ : int = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 371
|
def SCREAMING_SNAKE_CASE__ ( __a = 60_08_51_47_51_43 ):
try:
snake_case_ : Optional[Any] = int(__a )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
snake_case_ : Any = 2
snake_case_ : Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ : Tuple = i
while n % i == 0:
snake_case_ : List[str] = n // i
i += 1
return int(__a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import os
import string
import sys
__UpperCamelCase : List[Any] = 1 << 8
__UpperCamelCase : Union[str, Any] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__UpperCamelCase : Optional[Any] = KEYMAP["""up"""]
__UpperCamelCase : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def a_ ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case__ = chr(KEYMAP['esc'] )
except KeyError:
snake_case__ = cha[1]
else:
snake_case__ = ch.decode(_A )
else:
snake_case__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ = sys.stdin.fileno()
snake_case__ = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case__ = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case__ = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
"""simple docstring"""
import copy
import re
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = """hp"""
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Tuple = None
@classmethod
def __A ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = prefix
SCREAMING_SNAKE_CASE = defaults
cls.build_naming_info()
@staticmethod
def __A ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
if len(lowerCAmelCase__ ) == 0:
return ""
SCREAMING_SNAKE_CASE = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCAmelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = ''
while integer != 0:
SCREAMING_SNAKE_CASE = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE = 0
while True:
SCREAMING_SNAKE_CASE = word + '#' + int_to_alphabetic(lowerCAmelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE = sword
break
SCREAMING_SNAKE_CASE = short_word
SCREAMING_SNAKE_CASE = word
return short_word
@staticmethod
def __A ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = param_name.split('_' )
SCREAMING_SNAKE_CASE = [TrialShortNamer.shortname_for_word(lowerCAmelCase__ , lowerCAmelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE = ['', '_']
for separator in separators:
SCREAMING_SNAKE_CASE = separator.join(lowerCAmelCase__ )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE = shortname
SCREAMING_SNAKE_CASE = param_name
return shortname
return param_name
@staticmethod
def __A ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = TrialShortNamer.shortname_for_key(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = short_name
SCREAMING_SNAKE_CASE = param_name
@classmethod
def __A ( cls ) -> List[str]:
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
SCREAMING_SNAKE_CASE = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = info
@classmethod
def __A ( cls , lowerCAmelCase__ ) -> Dict:
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = 1 if v else 0
SCREAMING_SNAKE_CASE = '' if isinstance(lowerCAmelCase__ , (int, float) ) else '-'
SCREAMING_SNAKE_CASE = F'{key}{sep}{v}'
name.append(lowerCAmelCase__ )
return "_".join(lowerCAmelCase__ )
@classmethod
def __A ( cls , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE = []
else:
SCREAMING_SNAKE_CASE = repr.split('_' )
SCREAMING_SNAKE_CASE = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = value.split('-' )
else:
SCREAMING_SNAKE_CASE = re.sub('[0-9.]' , '' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = float(re.sub('[^0-9.]' , '' , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = cls.NAMING_INFO['reverse_short_param'][p_k]
SCREAMING_SNAKE_CASE = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE = cls.DEFAULTS[k]
return parameters
| 38
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = DebertaTokenizer
_a = True
_a = DebertaTokenizerFast
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowercase =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
_lowercase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase ={'unk_token': '[UNK]'}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def A__ ( self , **lowerCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase ='lower newer'
_lowercase ='lower newer'
return input_text, output_text
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.get_tokenizer()
_lowercase ='lower newer'
_lowercase =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.get_tokenizer()
_lowercase =tokenizer('Hello' , 'World' )
_lowercase =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowerCAmelCase )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowercase =tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase )
_lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase )
_lowercase =tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
_lowercase =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowercase =tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowercase =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowercase =tokenizer(lowerCAmelCase , padding=lowerCAmelCase )
_lowercase =[tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) for seq in encoding['input_ids']]
# fmt: off
_lowercase ={
'input_ids': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowercase =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowerCAmelCase )
for expected, decoded in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 205
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 361
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = grid.shape
__UpperCAmelCase : List[str] = [-1, 1, 0, 0]
__UpperCAmelCase : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCAmelCase , __UpperCAmelCase : Tuple = [(0, source)], set()
__UpperCAmelCase : Any = np.full((rows, cols), np.inf )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Union[str, Any] = np.empty((rows, cols), dtype=_UpperCAmelCase )
__UpperCAmelCase : Any = None
while queue:
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCAmelCase : int = []
while (x, y) != source:
path.append((x, y) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCAmelCase : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase, (dist + 1, (nx, ny)) )
__UpperCAmelCase : List[str] = dist + 1
__UpperCAmelCase : int = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
import numpy as np
def __lowercase ( __lowerCAmelCase : Any ):
return 1 / (1 + np.exp(-vector ))
def __lowercase ( __lowerCAmelCase : List[str] ):
return vector * sigmoid(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =XLMProphetNetTokenizer
A__ : List[Any] =False
A__ : Tuple =True
def A_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = '[PAD]'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCAmelCase_ ) , 1012 )
def A_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Optional[Any] ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [35389, 6672, 49, 2]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 176
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[Any] = AudioLDMPipeline
_lowerCAmelCase : List[str] = TEXT_TO_AUDIO_PARAMS
_lowerCAmelCase : List[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCAmelCase : Tuple = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def _snake_case ( self : List[Any] ):
torch.manual_seed(0 )
snake_case_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase_ , )
snake_case_ : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : List[str] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
snake_case_ : Union[str, Any] = ClapTextModelWithProjection(lowercase_ )
snake_case_ : Union[str, Any] = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
snake_case_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase_ , )
snake_case_ : Optional[int] = SpeechTaHifiGan(lowercase_ )
snake_case_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def _snake_case ( self : int , lowercase_ : Any , lowercase_ : str=0 ):
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : Any = torch.manual_seed(lowercase_ )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : Tuple = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Union[str, Any] = AudioLDMPipeline(**lowercase_ )
snake_case_ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Optional[int] = self.get_dummy_inputs(lowercase_ )
snake_case_ : Dict = audioldm_pipe(**lowercase_ )
snake_case_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 256
snake_case_ : List[str] = audio[:10]
snake_case_ : Any = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : int ):
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Dict = AudioLDMPipeline(**lowercase_ )
snake_case_ : Optional[int] = audioldm_pipe.to(lowercase_ )
snake_case_ : int = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Any = self.get_dummy_inputs(lowercase_ )
snake_case_ : Optional[int] = 3 * [inputs['''prompt''']]
# forward
snake_case_ : Union[str, Any] = audioldm_pipe(**lowercase_ )
snake_case_ : Optional[int] = output.audios[0]
snake_case_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
snake_case_ : str = 3 * [inputs.pop('''prompt''' )]
snake_case_ : Dict = audioldm_pipe.tokenizer(
lowercase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ : Union[str, Any] = text_inputs['''input_ids'''].to(lowercase_ )
snake_case_ : Optional[Any] = audioldm_pipe.text_encoder(
lowercase_ , )
snake_case_ : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ : Union[str, Any] = F.normalize(lowercase_ , dim=-1 )
snake_case_ : List[Any] = prompt_embeds
# forward
snake_case_ : List[str] = audioldm_pipe(**lowercase_ )
snake_case_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self : Any ):
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Optional[Any] = AudioLDMPipeline(**lowercase_ )
snake_case_ : int = audioldm_pipe.to(lowercase_ )
snake_case_ : Union[str, Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Any = self.get_dummy_inputs(lowercase_ )
snake_case_ : List[Any] = 3 * ['''this is a negative prompt''']
snake_case_ : str = negative_prompt
snake_case_ : Any = 3 * [inputs['''prompt''']]
# forward
snake_case_ : List[str] = audioldm_pipe(**lowercase_ )
snake_case_ : Optional[Any] = output.audios[0]
snake_case_ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
snake_case_ : List[Any] = 3 * [inputs.pop('''prompt''' )]
snake_case_ : Dict = []
for p in [prompt, negative_prompt]:
snake_case_ : List[str] = audioldm_pipe.tokenizer(
lowercase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ : Optional[Any] = text_inputs['''input_ids'''].to(lowercase_ )
snake_case_ : str = audioldm_pipe.text_encoder(
lowercase_ , )
snake_case_ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ : Optional[Any] = F.normalize(lowercase_ , dim=-1 )
embeds.append(lowercase_ )
snake_case_ : List[str] = embeds
# forward
snake_case_ : Optional[Any] = audioldm_pipe(**lowercase_ )
snake_case_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : str = PNDMScheduler(skip_prk_steps=lowercase_ )
snake_case_ : str = AudioLDMPipeline(**lowercase_ )
snake_case_ : Union[str, Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Any = self.get_dummy_inputs(lowercase_ )
snake_case_ : Optional[Any] = '''egg cracking'''
snake_case_ : Tuple = audioldm_pipe(**lowercase_ , negative_prompt=lowercase_ )
snake_case_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 256
snake_case_ : Union[str, Any] = audio[:10]
snake_case_ : List[Any] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : str = PNDMScheduler(skip_prk_steps=lowercase_ )
snake_case_ : Any = AudioLDMPipeline(**lowercase_ )
snake_case_ : List[str] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : List[Any] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
snake_case_ : List[str] = audioldm_pipe(lowercase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
snake_case_ : List[str] = 2
snake_case_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
snake_case_ : Union[str, Any] = 2
snake_case_ : Dict = audioldm_pipe(lowercase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
snake_case_ : Union[str, Any] = 2
snake_case_ : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : Optional[int] = AudioLDMPipeline(**lowercase_ )
snake_case_ : Union[str, Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : int = audioldm_pipe.vocoder.config.sampling_rate
snake_case_ : Dict = self.get_dummy_inputs(lowercase_ )
snake_case_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **lowercase_ )
snake_case_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.0_16
snake_case_ : str = audioldm_pipe(audio_length_in_s=0.0_32 , **lowercase_ )
snake_case_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.0_32
def _snake_case ( self : str ):
snake_case_ : int = self.get_dummy_components()
snake_case_ : Union[str, Any] = AudioLDMPipeline(**lowercase_ )
snake_case_ : List[str] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Optional[int] = ['''hey''']
snake_case_ : List[Any] = audioldm_pipe(lowercase_ , num_inference_steps=1 )
snake_case_ : Dict = output.audios.shape
assert audio_shape == (1, 256)
snake_case_ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
snake_case_ : List[Any] = SpeechTaHifiGan(lowercase_ ).to(lowercase_ )
snake_case_ : Optional[Any] = audioldm_pipe(lowercase_ , num_inference_steps=1 )
snake_case_ : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _snake_case ( self : int ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase_ )
def _snake_case ( self : Dict ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ )
@slow
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict="cpu" , lowercase_ : int=torch.floataa , lowercase_ : Optional[Any]=0 ):
snake_case_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : Optional[int] = np.random.RandomState(lowercase_ ).standard_normal((1, 8, 128, 16) )
snake_case_ : Dict = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
snake_case_ : int = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def _snake_case ( self : Any ):
snake_case_ : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ : List[str] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Optional[int] = self.get_inputs(lowercase_ )
snake_case_ : List[Any] = 25
snake_case_ : Any = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 81920
snake_case_ : int = audio[77230:77240]
snake_case_ : Any = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
snake_case_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _snake_case ( self : Dict ):
snake_case_ : str = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ : Tuple = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
snake_case_ : Tuple = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : Tuple = self.get_inputs(lowercase_ )
snake_case_ : List[str] = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 81920
snake_case_ : List[Any] = audio[27780:27790]
snake_case_ : str = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
snake_case_ : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 357
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155
| 0
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66
|
def __lowerCAmelCase ( a__ , a__ ) -> float:
def get_matched_characters(a__ , a__ ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
__a = F"""{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}"""
return "".join(a__ )
# matching characters
__a = get_matched_characters(a__ , a__ )
__a = get_matched_characters(a__ , a__ )
__a = len(a__ )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 6
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Dict , lowercase : list[T] , lowercase : Callable[[T, T], T] ):
'''simple docstring'''
_snake_case = None
_snake_case = len(lowercase )
_snake_case = [any_type for _ in range(self.N )] + arr
_snake_case = fnc
self.build()
def A ( self : List[str] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
_snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A ( self : Union[str, Any] , lowercase : int , lowercase : T ):
'''simple docstring'''
p += self.N
_snake_case = v
while p > 1:
_snake_case = p // 2
_snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A ( self : List[str] , lowercase : int , lowercase : int ): # noqa: E741
'''simple docstring'''
_snake_case , _snake_case = l + self.N, r + self.N
_snake_case = None
while l <= r:
if l % 2 == 1:
_snake_case = self.st[l] if res is None else self.fn(lowercase , self.st[l] )
if r % 2 == 0:
_snake_case = self.st[r] if res is None else self.fn(lowercase , self.st[r] )
_snake_case , _snake_case = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase : Optional[int] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase : int = SegmentTree(test_array, min)
_lowerCamelCase : Optional[Any] = SegmentTree(test_array, max)
_lowerCamelCase : int = SegmentTree(test_array, lambda a, b: a + b)
def a_ ( ) -> None:
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_snake_case = reduce(__lowercase , test_array[i : j + 1] )
_snake_case = reduce(__lowercase , test_array[i : j + 1] )
_snake_case = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase : Dict = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 130
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a_ ( __lowercase : Dict ) -> int:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : List[str] ) -> str:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[Any]="facebook/mbart-large-en-ro" , __lowercase : Union[str, Any]=False , __lowercase : Optional[Any]=False ) -> int:
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = state_dict['encoder.embed_tokens.weight'].shape[0]
_snake_case = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_snake_case = 'relu'
_snake_case = state_dict['decoder.embed_tokens.weight']
_snake_case = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 130
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.